public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.6 commit in: /
Date: Wed, 29 Apr 2020 17:55:30 +0000 (UTC)	[thread overview]
Message-ID: <1588182914.9073e1453c396ffd5d5019142d436ff31e4826b4.mpagano@gentoo> (raw)

commit:     9073e1453c396ffd5d5019142d436ff31e4826b4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 29 17:55:14 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 29 17:55:14 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9073e145

Linux patch 5.6.8

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1007_linux-5.6.8.patch | 6394 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6398 insertions(+)

diff --git a/0000_README b/0000_README
index 8000cff..d756ad3 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-5.6.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.6.7
 
+Patch:  1007_linux-5.6.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.6.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-5.6.8.patch b/1007_linux-5.6.8.patch
new file mode 100644
index 0000000..50e5e7d
--- /dev/null
+++ b/1007_linux-5.6.8.patch
@@ -0,0 +1,6394 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 6ba631cc5a56..20aac805e197 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5085,8 +5085,7 @@
+ 
+ 	usbcore.old_scheme_first=
+ 			[USB] Start with the old device initialization
+-			scheme,  applies only to low and full-speed devices
+-			 (default 0 = off).
++			scheme (default 0 = off).
+ 
+ 	usbcore.usbfs_memory_mb=
+ 			[USB] Memory limit (in MB) for buffers allocated by
+diff --git a/Makefile b/Makefile
+index b64df959e5d7..e7101c99d81b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 6
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
+index 03506ce46149..e7364e6c8c6b 100644
+--- a/arch/arm/mach-imx/Makefile
++++ b/arch/arm/mach-imx/Makefile
+@@ -91,8 +91,10 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
+ obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
+ obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
+ endif
++ifeq ($(CONFIG_ARM_CPU_SUSPEND),y)
+ AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
+ obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
++endif
+ obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
+ 
+ obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 16af0d8d90a8..89e7f891bcd0 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -710,7 +710,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
+ 	stw	r10,_CCR(r1)
+ 	stw	r1,KSP(r3)	/* Set old stack pointer */
+ 
+-	kuap_check r2, r4
++	kuap_check r2, r0
+ #ifdef CONFIG_SMP
+ 	/* We need a sync somewhere here to make sure that if the
+ 	 * previous task gets rescheduled on another CPU, it sees all
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 438a9befce41..8105010b0e76 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -534,6 +534,8 @@ static bool __init parse_cache_info(struct device_node *np,
+ 	lsizep = of_get_property(np, propnames[3], NULL);
+ 	if (bsizep == NULL)
+ 		bsizep = lsizep;
++	if (lsizep == NULL)
++		lsizep = bsizep;
+ 	if (lsizep != NULL)
+ 		lsize = be32_to_cpu(*lsizep);
+ 	if (bsizep != NULL)
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 1168e8b37e30..716f8d0960a7 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -522,35 +522,6 @@ static inline void clear_irq_work_pending(void)
+ 		"i" (offsetof(struct paca_struct, irq_work_pending)));
+ }
+ 
+-void arch_irq_work_raise(void)
+-{
+-	preempt_disable();
+-	set_irq_work_pending_flag();
+-	/*
+-	 * Non-nmi code running with interrupts disabled will replay
+-	 * irq_happened before it re-enables interrupts, so setthe
+-	 * decrementer there instead of causing a hardware exception
+-	 * which would immediately hit the masked interrupt handler
+-	 * and have the net effect of setting the decrementer in
+-	 * irq_happened.
+-	 *
+-	 * NMI interrupts can not check this when they return, so the
+-	 * decrementer hardware exception is raised, which will fire
+-	 * when interrupts are next enabled.
+-	 *
+-	 * BookE does not support this yet, it must audit all NMI
+-	 * interrupt handlers to ensure they call nmi_enter() so this
+-	 * check would be correct.
+-	 */
+-	if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
+-		set_dec(1);
+-	} else {
+-		hard_irq_disable();
+-		local_paca->irq_happened |= PACA_IRQ_DEC;
+-	}
+-	preempt_enable();
+-}
+-
+ #else /* 32-bit */
+ 
+ DEFINE_PER_CPU(u8, irq_work_pending);
+@@ -559,16 +530,27 @@ DEFINE_PER_CPU(u8, irq_work_pending);
+ #define test_irq_work_pending()		__this_cpu_read(irq_work_pending)
+ #define clear_irq_work_pending()	__this_cpu_write(irq_work_pending, 0)
+ 
++#endif /* 32 vs 64 bit */
++
+ void arch_irq_work_raise(void)
+ {
++	/*
++	 * 64-bit code that uses irq soft-mask can just cause an immediate
++	 * interrupt here that gets soft masked, if this is called under
++	 * local_irq_disable(). It might be possible to prevent that happening
++	 * by noticing interrupts are disabled and setting decrementer pending
++	 * to be replayed when irqs are enabled. The problem there is that
++	 * tracing can call irq_work_raise, including in code that does low
++	 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
++	 * which could get tangled up if we're messing with the same state
++	 * here.
++	 */
+ 	preempt_disable();
+ 	set_irq_work_pending_flag();
+ 	set_dec(1);
+ 	preempt_enable();
+ }
+ 
+-#endif /* 32 vs 64 bit */
+-
+ #else  /* CONFIG_IRQ_WORK */
+ 
+ #define test_irq_work_pending()	0
+diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
+index 3189308dece4..d83a12c5bc7f 100644
+--- a/arch/powerpc/mm/nohash/8xx.c
++++ b/arch/powerpc/mm/nohash/8xx.c
+@@ -185,6 +185,7 @@ void mmu_mark_initmem_nx(void)
+ 			mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL);
+ 		}
+ 	}
++	_tlbil_all();
+ }
+ 
+ #ifdef CONFIG_STRICT_KERNEL_RWX
+@@ -199,6 +200,8 @@ void mmu_mark_rodata_ro(void)
+ 				      ~(LARGE_PAGE_SIZE_8M - 1)));
+ 	mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
+ 
++	_tlbil_all();
++
+ 	/* Update page tables for PTDUMP and BDI */
+ 	mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
+ 	mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX);
+diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
+index 6caedc88474f..3b5ffc92715d 100644
+--- a/arch/powerpc/platforms/Kconfig.cputype
++++ b/arch/powerpc/platforms/Kconfig.cputype
+@@ -397,7 +397,7 @@ config PPC_KUAP
+ 
+ config PPC_KUAP_DEBUG
+ 	bool "Extra debugging for Kernel Userspace Access Protection"
+-	depends on PPC_HAVE_KUAP && (PPC_RADIX_MMU || PPC_32)
++	depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32)
+ 	help
+ 	  Add extra debugging for Kernel Userspace Access Protection (KUAP)
+ 	  If you're unsure, say N.
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
+index 1d7f973c647b..43710b69e09e 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -683,6 +683,17 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
+ #endif
+ 
+ out:
++	/*
++	 * Enable translation as we will be accessing per-cpu variables
++	 * in save_mce_event() which may fall outside RMO region, also
++	 * leave it enabled because subsequently we will be queuing work
++	 * to workqueues where again per-cpu variables accessed, besides
++	 * fwnmi_release_errinfo() crashes when called in realmode on
++	 * pseries.
++	 * Note: All the realmode handling like flushing SLB entries for
++	 *       SLB multihit is done by now.
++	 */
++	mtmsr(mfmsr() | MSR_IR | MSR_DR);
+ 	save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
+ 			&mce_err, regs->nip, eaddr, paddr);
+ 
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index c2e6d4ba4e23..198a6b320018 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -1930,6 +1930,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
+ 			start = slot + 1;
+ 	}
+ 
++	if (start >= slots->used_slots)
++		return slots->used_slots - 1;
++
+ 	if (gfn >= memslots[start].base_gfn &&
+ 	    gfn < memslots[start].base_gfn + memslots[start].npages) {
+ 		atomic_set(&slots->lru_slot, start);
+diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
+index c4f8039a35e8..0267405ab7c6 100644
+--- a/arch/s390/lib/uaccess.c
++++ b/arch/s390/lib/uaccess.c
+@@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void)
+ {
+ 	mm_segment_t old_fs;
+ 	unsigned long asce, cr;
++	unsigned long flags;
+ 
+ 	old_fs = current->thread.mm_segment;
+ 	if (old_fs & 1)
+ 		return old_fs;
++	/* protect against a concurrent page table upgrade */
++	local_irq_save(flags);
+ 	current->thread.mm_segment |= 1;
+ 	asce = S390_lowcore.kernel_asce;
+ 	if (likely(old_fs == USER_DS)) {
+@@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void)
+ 		__ctl_load(asce, 7, 7);
+ 		set_cpu_flag(CIF_ASCE_SECONDARY);
+ 	}
++	local_irq_restore(flags);
+ 	return old_fs;
+ }
+ EXPORT_SYMBOL(enable_sacf_uaccess);
+diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
+index 3dd253f81a77..46071be897ab 100644
+--- a/arch/s390/mm/pgalloc.c
++++ b/arch/s390/mm/pgalloc.c
+@@ -70,8 +70,20 @@ static void __crst_table_upgrade(void *arg)
+ {
+ 	struct mm_struct *mm = arg;
+ 
+-	if (current->active_mm == mm)
+-		set_user_asce(mm);
++	/* we must change all active ASCEs to avoid the creation of new TLBs */
++	if (current->active_mm == mm) {
++		S390_lowcore.user_asce = mm->context.asce;
++		if (current->thread.mm_segment == USER_DS) {
++			__ctl_load(S390_lowcore.user_asce, 1, 1);
++			/* Mark user-ASCE present in CR1 */
++			clear_cpu_flag(CIF_ASCE_PRIMARY);
++		}
++		if (current->thread.mm_segment == USER_DS_SACF) {
++			__ctl_load(S390_lowcore.user_asce, 7, 7);
++			/* enable_sacf_uaccess does all or nothing */
++			WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
++		}
++	}
+ 	__tlb_flush_local();
+ }
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 0a7867897507..c1ffe7d24f83 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -4571,7 +4571,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
+  */
+ static void kvm_machine_check(void)
+ {
+-#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
++#if defined(CONFIG_X86_MCE)
+ 	struct pt_regs regs = {
+ 		.cs = 3, /* Fake ring 3 no matter what the guest ran on */
+ 		.flags = X86_EFLAGS_IF,
+diff --git a/block/partition-generic.c b/block/partition-generic.c
+index 564fae77711d..ebe4c2e9834b 100644
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -468,7 +468,7 @@ int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev)
+ 
+ 	if (!disk_part_scan_enabled(disk))
+ 		return 0;
+-	if (bdev->bd_part_count || bdev->bd_super)
++	if (bdev->bd_part_count || bdev->bd_openers > 1)
+ 		return -EBUSY;
+ 	res = invalidate_partition(disk, 0);
+ 	if (res)
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 739b372a5112..d943e713d5e3 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -427,11 +427,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
+ 	 * information.
+ 	 */
+ 	struct file *file = lo->lo_backing_file;
++	struct request_queue *q = lo->lo_queue;
+ 	int ret;
+ 
+ 	mode |= FALLOC_FL_KEEP_SIZE;
+ 
+-	if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
++	if (!blk_queue_discard(q)) {
+ 		ret = -EOPNOTSUPP;
+ 		goto out;
+ 	}
+@@ -865,28 +866,47 @@ static void loop_config_discard(struct loop_device *lo)
+ 	struct inode *inode = file->f_mapping->host;
+ 	struct request_queue *q = lo->lo_queue;
+ 
++	/*
++	 * If the backing device is a block device, mirror its zeroing
++	 * capability. Set the discard sectors to the block device's zeroing
++	 * capabilities because loop discards result in blkdev_issue_zeroout(),
++	 * not blkdev_issue_discard(). This maintains consistent behavior with
++	 * file-backed loop devices: discarded regions read back as zero.
++	 */
++	if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
++		struct request_queue *backingq;
++
++		backingq = bdev_get_queue(inode->i_bdev);
++		blk_queue_max_discard_sectors(q,
++			backingq->limits.max_write_zeroes_sectors);
++
++		blk_queue_max_write_zeroes_sectors(q,
++			backingq->limits.max_write_zeroes_sectors);
++
+ 	/*
+ 	 * We use punch hole to reclaim the free space used by the
+ 	 * image a.k.a. discard. However we do not support discard if
+ 	 * encryption is enabled, because it may give an attacker
+ 	 * useful information.
+ 	 */
+-	if ((!file->f_op->fallocate) ||
+-	    lo->lo_encrypt_key_size) {
++	} else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
+ 		q->limits.discard_granularity = 0;
+ 		q->limits.discard_alignment = 0;
+ 		blk_queue_max_discard_sectors(q, 0);
+ 		blk_queue_max_write_zeroes_sectors(q, 0);
+-		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
+-		return;
+-	}
+ 
+-	q->limits.discard_granularity = inode->i_sb->s_blocksize;
+-	q->limits.discard_alignment = 0;
++	} else {
++		q->limits.discard_granularity = inode->i_sb->s_blocksize;
++		q->limits.discard_alignment = 0;
+ 
+-	blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+-	blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+-	blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
++		blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
++		blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
++	}
++
++	if (q->limits.max_write_zeroes_sectors)
++		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
++	else
++		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
+ }
+ 
+ static void loop_unprepare_queue(struct loop_device *lo)
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index a438b1206fcb..1621ce818705 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -323,7 +323,7 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ 
+ 	for (i = 0; i < chip->nr_allocated_banks; i++) {
+ 		if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
+-			rc = EINVAL;
++			rc = -EINVAL;
+ 			goto out;
+ 		}
+ 	}
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index 78cc52690177..e82013d587b4 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (C) 2012 IBM Corporation
++ * Copyright (C) 2012-2020 IBM Corporation
+  *
+  * Author: Ashley Lai <ashleydlai@gmail.com>
+  *
+@@ -133,6 +133,64 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	return len;
+ }
+ 
++/**
++ * ibmvtpm_crq_send_init - Send a CRQ initialize message
++ * @ibmvtpm:	vtpm device struct
++ *
++ * Return:
++ *	0 on success.
++ *	Non-zero on failure.
++ */
++static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
++{
++	int rc;
++
++	rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
++	if (rc != H_SUCCESS)
++		dev_err(ibmvtpm->dev,
++			"%s failed rc=%d\n", __func__, rc);
++
++	return rc;
++}
++
++/**
++ * tpm_ibmvtpm_resume - Resume from suspend
++ *
++ * @dev:	device struct
++ *
++ * Return: Always 0.
++ */
++static int tpm_ibmvtpm_resume(struct device *dev)
++{
++	struct tpm_chip *chip = dev_get_drvdata(dev);
++	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
++	int rc = 0;
++
++	do {
++		if (rc)
++			msleep(100);
++		rc = plpar_hcall_norets(H_ENABLE_CRQ,
++					ibmvtpm->vdev->unit_address);
++	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
++
++	if (rc) {
++		dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
++		return rc;
++	}
++
++	rc = vio_enable_interrupts(ibmvtpm->vdev);
++	if (rc) {
++		dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
++		return rc;
++	}
++
++	rc = ibmvtpm_crq_send_init(ibmvtpm);
++	if (rc)
++		dev_err(dev, "Error send_init rc=%d\n", rc);
++
++	return rc;
++}
++
+ /**
+  * tpm_ibmvtpm_send() - Send a TPM command
+  * @chip:	tpm chip struct
+@@ -146,6 +204,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+ 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
++	bool retry = true;
+ 	int rc, sig;
+ 
+ 	if (!ibmvtpm->rtce_buf) {
+@@ -179,18 +238,27 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	 */
+ 	ibmvtpm->tpm_processing_cmd = true;
+ 
++again:
+ 	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ 			IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
+ 			count, ibmvtpm->rtce_dma_handle);
+ 	if (rc != H_SUCCESS) {
++		/*
++		 * H_CLOSED can be returned after LPM resume.  Call
++		 * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
++		 * ibmvtpm_send_crq() once before failing.
++		 */
++		if (rc == H_CLOSED && retry) {
++			tpm_ibmvtpm_resume(ibmvtpm->dev);
++			retry = false;
++			goto again;
++		}
+ 		dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+-		rc = 0;
+ 		ibmvtpm->tpm_processing_cmd = false;
+-	} else
+-		rc = 0;
++	}
+ 
+ 	spin_unlock(&ibmvtpm->rtce_lock);
+-	return rc;
++	return 0;
+ }
+ 
+ static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
+@@ -268,26 +336,6 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
+ 	return rc;
+ }
+ 
+-/**
+- * ibmvtpm_crq_send_init - Send a CRQ initialize message
+- * @ibmvtpm:	vtpm device struct
+- *
+- * Return:
+- *	0 on success.
+- *	Non-zero on failure.
+- */
+-static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
+-{
+-	int rc;
+-
+-	rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
+-	if (rc != H_SUCCESS)
+-		dev_err(ibmvtpm->dev,
+-			"ibmvtpm_crq_send_init failed rc=%d\n", rc);
+-
+-	return rc;
+-}
+-
+ /**
+  * tpm_ibmvtpm_remove - ibm vtpm remove entry point
+  * @vdev:	vio device struct
+@@ -400,44 +448,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
+ 				  ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
+ }
+ 
+-/**
+- * tpm_ibmvtpm_resume - Resume from suspend
+- *
+- * @dev:	device struct
+- *
+- * Return: Always 0.
+- */
+-static int tpm_ibmvtpm_resume(struct device *dev)
+-{
+-	struct tpm_chip *chip = dev_get_drvdata(dev);
+-	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+-	int rc = 0;
+-
+-	do {
+-		if (rc)
+-			msleep(100);
+-		rc = plpar_hcall_norets(H_ENABLE_CRQ,
+-					ibmvtpm->vdev->unit_address);
+-	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+-
+-	if (rc) {
+-		dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
+-		return rc;
+-	}
+-
+-	rc = vio_enable_interrupts(ibmvtpm->vdev);
+-	if (rc) {
+-		dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+-		return rc;
+-	}
+-
+-	rc = ibmvtpm_crq_send_init(ibmvtpm);
+-	if (rc)
+-		dev_err(dev, "Error send_init rc=%d\n", rc);
+-
+-	return rc;
+-}
+-
+ static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
+ {
+ 	return (status == 0);
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 27c6ca031e23..2435216bd10a 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -433,6 +433,9 @@ static void disable_interrupts(struct tpm_chip *chip)
+ 	u32 intmask;
+ 	int rc;
+ 
++	if (priv->irq == 0)
++		return;
++
+ 	rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+ 	if (rc < 0)
+ 		intmask = 0;
+@@ -1062,9 +1065,12 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 		if (irq) {
+ 			tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
+ 						 irq);
+-			if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
++			if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+ 				dev_err(&chip->dev, FW_BUG
+ 					"TPM interrupt not working, polling instead\n");
++
++				disable_interrupts(chip);
++			}
+ 		} else {
+ 			tpm_tis_probe_irq(chip, intmask);
+ 		}
+diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
+index 89ca292236ad..538755062ab7 100644
+--- a/drivers/fpga/dfl-pci.c
++++ b/drivers/fpga/dfl-pci.c
+@@ -248,11 +248,13 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
+ 			return ret;
+ 
+ 		ret = pci_enable_sriov(pcidev, num_vfs);
+-		if (ret)
++		if (ret) {
+ 			dfl_fpga_cdev_config_ports_pf(cdev);
++			return ret;
++		}
+ 	}
+ 
+-	return ret;
++	return num_vfs;
+ }
+ 
+ static void cci_pci_remove(struct pci_dev *pcidev)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 04441dbcba76..188e51600070 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ 	int i = 0;
+ 	bool ret = false;
+ 
++	stream->adjust = *adjust;
++
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ 
+@@ -2347,7 +2349,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 	enum surface_update_type update_type;
+ 	struct dc_state *context;
+ 	struct dc_context *dc_ctx = dc->ctx;
+-	int i;
++	int i, j;
+ 
+ 	stream_status = dc_stream_get_status(stream);
+ 	context = dc->current_state;
+@@ -2385,6 +2387,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 
+ 		copy_surface_update_to_plane(surface, &srf_updates[i]);
+ 
++		if (update_type >= UPDATE_TYPE_MED) {
++			for (j = 0; j < dc->res_pool->pipe_count; j++) {
++				struct pipe_ctx *pipe_ctx =
++					&context->res_ctx.pipe_ctx[j];
++
++				if (pipe_ctx->plane_state != surface)
++					continue;
++
++				resource_build_scaling_params(pipe_ctx);
++			}
++		}
+ 	}
+ 
+ 	copy_stream_update_to_stream(dc, context, stream, stream_update);
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 7b7f0da01346..22713ef0eac8 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -4290,6 +4290,7 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+ 	if (pos->vcpi) {
+ 		drm_dp_mst_put_port_malloc(port);
+ 		pos->vcpi = 0;
++		pos->pbn = 0;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
+index b2d245963d9f..8accea06185b 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rps.c
++++ b/drivers/gpu/drm/i915/gt/intel_rps.c
+@@ -83,7 +83,8 @@ static void rps_enable_interrupts(struct intel_rps *rps)
+ 	gen6_gt_pm_enable_irq(gt, rps->pm_events);
+ 	spin_unlock_irq(&gt->irq_lock);
+ 
+-	set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq));
++	intel_uncore_write(gt->uncore,
++			   GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
+ }
+ 
+ static void gen6_rps_reset_interrupts(struct intel_rps *rps)
+@@ -117,7 +118,8 @@ static void rps_disable_interrupts(struct intel_rps *rps)
+ 
+ 	rps->pm_events = 0;
+ 
+-	set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
++	intel_uncore_write(gt->uncore,
++			   GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
+ 
+ 	spin_lock_irq(&gt->irq_lock);
+ 	gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index 80c3f963527b..ae622ee6d08c 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -1418,8 +1418,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
+ static void stm32_adc_dma_buffer_done(void *data)
+ {
+ 	struct iio_dev *indio_dev = data;
++	struct stm32_adc *adc = iio_priv(indio_dev);
++	int residue = stm32_adc_dma_residue(adc);
++
++	/*
++	 * In DMA mode the trigger services of IIO are not used
++	 * (e.g. no call to iio_trigger_poll).
++	 * Calling irq handler associated to the hardware trigger is not
++	 * relevant as the conversions have already been done. Data
++	 * transfers are performed directly in DMA callback instead.
++	 * This implementation avoids to call trigger irq handler that
++	 * may sleep, in an atomic context (DMA irq handler context).
++	 */
++	dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi);
+ 
+-	iio_trigger_poll_chained(indio_dev->trig);
++	while (residue >= indio_dev->scan_bytes) {
++		u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi];
++
++		iio_push_to_buffers(indio_dev, buffer);
++
++		residue -= indio_dev->scan_bytes;
++		adc->bufi += indio_dev->scan_bytes;
++		if (adc->bufi >= adc->rx_buf_sz)
++			adc->bufi = 0;
++	}
+ }
+ 
+ static int stm32_adc_dma_start(struct iio_dev *indio_dev)
+@@ -1845,6 +1867,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ {
+ 	struct iio_dev *indio_dev;
+ 	struct device *dev = &pdev->dev;
++	irqreturn_t (*handler)(int irq, void *p) = NULL;
+ 	struct stm32_adc *adc;
+ 	int ret;
+ 
+@@ -1911,9 +1934,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	if (!adc->dma_chan)
++		handler = &stm32_adc_trigger_handler;
++
+ 	ret = iio_triggered_buffer_setup(indio_dev,
+-					 &iio_pollfunc_store_time,
+-					 &stm32_adc_trigger_handler,
++					 &iio_pollfunc_store_time, handler,
+ 					 &stm32_adc_buffer_setup_ops);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "buffer setup failed\n");
+diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c
+index 9a460807d46d..abe4b56c847c 100644
+--- a/drivers/iio/adc/ti-ads8344.c
++++ b/drivers/iio/adc/ti-ads8344.c
+@@ -29,7 +29,7 @@ struct ads8344 {
+ 	struct mutex lock;
+ 
+ 	u8 tx_buf ____cacheline_aligned;
+-	u16 rx_buf;
++	u8 rx_buf[3];
+ };
+ 
+ #define ADS8344_VOLTAGE_CHANNEL(chan, si)				\
+@@ -89,11 +89,11 @@ static int ads8344_adc_conversion(struct ads8344 *adc, int channel,
+ 
+ 	udelay(9);
+ 
+-	ret = spi_read(spi, &adc->rx_buf, 2);
++	ret = spi_read(spi, adc->rx_buf, sizeof(adc->rx_buf));
+ 	if (ret)
+ 		return ret;
+ 
+-	return adc->rx_buf;
++	return adc->rx_buf[0] << 9 | adc->rx_buf[1] << 1 | adc->rx_buf[2] >> 7;
+ }
+ 
+ static int ads8344_read_raw(struct iio_dev *iio,
+diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
+index ec227b358cd6..6fd06e4eff73 100644
+--- a/drivers/iio/adc/xilinx-xadc-core.c
++++ b/drivers/iio/adc/xilinx-xadc-core.c
+@@ -102,6 +102,16 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
+ 
+ #define XADC_FLAGS_BUFFERED BIT(0)
+ 
++/*
++ * The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does
++ * not have a hardware FIFO. Which means an interrupt is generated for each
++ * conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely
++ * overloaded by the interrupts that it soft-lockups. For this reason the driver
++ * limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy,
++ * but still responsive.
++ */
++#define XADC_MAX_SAMPLERATE 150000
++
+ static void xadc_write_reg(struct xadc *xadc, unsigned int reg,
+ 	uint32_t val)
+ {
+@@ -674,7 +684,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
+ 
+ 	spin_lock_irqsave(&xadc->lock, flags);
+ 	xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
+-	xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
++	xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS);
+ 	if (state)
+ 		val |= XADC_AXI_INT_EOS;
+ 	else
+@@ -722,13 +732,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
+ {
+ 	uint16_t val;
+ 
++	/* Powerdown the ADC-B when it is not needed. */
+ 	switch (seq_mode) {
+ 	case XADC_CONF1_SEQ_SIMULTANEOUS:
+ 	case XADC_CONF1_SEQ_INDEPENDENT:
+-		val = XADC_CONF2_PD_ADC_B;
++		val = 0;
+ 		break;
+ 	default:
+-		val = 0;
++		val = XADC_CONF2_PD_ADC_B;
+ 		break;
+ 	}
+ 
+@@ -797,6 +808,16 @@ static int xadc_preenable(struct iio_dev *indio_dev)
+ 	if (ret)
+ 		goto err;
+ 
++	/*
++	 * In simultaneous mode the upper and lower aux channels are samples at
++	 * the same time. In this mode the upper 8 bits in the sequencer
++	 * register are don't care and the lower 8 bits control two channels
++	 * each. As such we must set the bit if either the channel in the lower
++	 * group or the upper group is enabled.
++	 */
++	if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS)
++		scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000;
++
+ 	ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
+ 	if (ret)
+ 		goto err;
+@@ -823,11 +844,27 @@ static const struct iio_buffer_setup_ops xadc_buffer_ops = {
+ 	.postdisable = &xadc_postdisable,
+ };
+ 
++static int xadc_read_samplerate(struct xadc *xadc)
++{
++	unsigned int div;
++	uint16_t val16;
++	int ret;
++
++	ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
++	if (ret)
++		return ret;
++
++	div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
++	if (div < 2)
++		div = 2;
++
++	return xadc_get_dclk_rate(xadc) / div / 26;
++}
++
+ static int xadc_read_raw(struct iio_dev *indio_dev,
+ 	struct iio_chan_spec const *chan, int *val, int *val2, long info)
+ {
+ 	struct xadc *xadc = iio_priv(indio_dev);
+-	unsigned int div;
+ 	uint16_t val16;
+ 	int ret;
+ 
+@@ -880,41 +917,31 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
+ 		*val = -((273150 << 12) / 503975);
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_SAMP_FREQ:
+-		ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
+-		if (ret)
++		ret = xadc_read_samplerate(xadc);
++		if (ret < 0)
+ 			return ret;
+ 
+-		div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
+-		if (div < 2)
+-			div = 2;
+-
+-		*val = xadc_get_dclk_rate(xadc) / div / 26;
+-
++		*val = ret;
+ 		return IIO_VAL_INT;
+ 	default:
+ 		return -EINVAL;
+ 	}
+ }
+ 
+-static int xadc_write_raw(struct iio_dev *indio_dev,
+-	struct iio_chan_spec const *chan, int val, int val2, long info)
++static int xadc_write_samplerate(struct xadc *xadc, int val)
+ {
+-	struct xadc *xadc = iio_priv(indio_dev);
+ 	unsigned long clk_rate = xadc_get_dclk_rate(xadc);
+ 	unsigned int div;
+ 
+ 	if (!clk_rate)
+ 		return -EINVAL;
+ 
+-	if (info != IIO_CHAN_INFO_SAMP_FREQ)
+-		return -EINVAL;
+-
+ 	if (val <= 0)
+ 		return -EINVAL;
+ 
+ 	/* Max. 150 kSPS */
+-	if (val > 150000)
+-		val = 150000;
++	if (val > XADC_MAX_SAMPLERATE)
++		val = XADC_MAX_SAMPLERATE;
+ 
+ 	val *= 26;
+ 
+@@ -927,7 +954,7 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
+ 	 * limit.
+ 	 */
+ 	div = clk_rate / val;
+-	if (clk_rate / div / 26 > 150000)
++	if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE)
+ 		div++;
+ 	if (div < 2)
+ 		div = 2;
+@@ -938,6 +965,17 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
+ 		div << XADC_CONF2_DIV_OFFSET);
+ }
+ 
++static int xadc_write_raw(struct iio_dev *indio_dev,
++	struct iio_chan_spec const *chan, int val, int val2, long info)
++{
++	struct xadc *xadc = iio_priv(indio_dev);
++
++	if (info != IIO_CHAN_INFO_SAMP_FREQ)
++		return -EINVAL;
++
++	return xadc_write_samplerate(xadc, val);
++}
++
+ static const struct iio_event_spec xadc_temp_events[] = {
+ 	{
+ 		.type = IIO_EV_TYPE_THRESH,
+@@ -1223,6 +1261,21 @@ static int xadc_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_free_samplerate_trigger;
+ 
++	/*
++	 * Make sure not to exceed the maximum samplerate since otherwise the
++	 * resulting interrupt storm will soft-lock the system.
++	 */
++	if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
++		ret = xadc_read_samplerate(xadc);
++		if (ret < 0)
++			goto err_free_samplerate_trigger;
++		if (ret > XADC_MAX_SAMPLERATE) {
++			ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
++			if (ret < 0)
++				goto err_free_samplerate_trigger;
++		}
++	}
++
+ 	ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
+ 			dev_name(&pdev->dev), indio_dev);
+ 	if (ret)
+diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
+index 0e35ff06f9af..13bdfbbf5f71 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_core.c
++++ b/drivers/iio/common/st_sensors/st_sensors_core.c
+@@ -79,7 +79,7 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
+ 	struct st_sensor_odr_avl odr_out = {0, 0};
+ 	struct st_sensor_data *sdata = iio_priv(indio_dev);
+ 
+-	if (!sdata->sensor_settings->odr.addr)
++	if (!sdata->sensor_settings->odr.mask)
+ 		return 0;
+ 
+ 	err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+index 84d219ae6aee..4426524b59f2 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+@@ -2036,11 +2036,21 @@ static int st_lsm6dsx_init_hw_timer(struct st_lsm6dsx_hw *hw)
+ 	return 0;
+ }
+ 
+-static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
++static int st_lsm6dsx_reset_device(struct st_lsm6dsx_hw *hw)
+ {
+ 	const struct st_lsm6dsx_reg *reg;
+ 	int err;
+ 
++	/*
++	 * flush hw FIFO before device reset in order to avoid
++	 * possible races on interrupt line 1. If the first interrupt
++	 * line is asserted during hw reset the device will work in
++	 * I3C-only mode (if it is supported)
++	 */
++	err = st_lsm6dsx_flush_fifo(hw);
++	if (err < 0 && err != -ENOTSUPP)
++		return err;
++
+ 	/* device sw reset */
+ 	reg = &hw->settings->reset;
+ 	err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+@@ -2059,6 +2069,18 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
+ 
+ 	msleep(50);
+ 
++	return 0;
++}
++
++static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
++{
++	const struct st_lsm6dsx_reg *reg;
++	int err;
++
++	err = st_lsm6dsx_reset_device(hw);
++	if (err < 0)
++		return err;
++
+ 	/* enable Block Data Update */
+ 	reg = &hw->settings->bdu;
+ 	err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 90ee4484a80a..2eb7b2968e5d 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -212,11 +212,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	}
+ 	hw = to_me_hw(dev);
+ 	hw->mem_addr = pcim_iomap_table(pdev)[0];
+-	hw->irq = pdev->irq;
+ 	hw->read_fws = mei_me_read_fws;
+ 
+ 	pci_enable_msi(pdev);
+ 
++	hw->irq = pdev->irq;
++
+ 	 /* request and enable interrupt */
+ 	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
+ 
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 1a69286daa8d..d93de7096ae0 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1454,6 +1454,10 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
+ 		reg |= ARLTBL_RW;
+ 	else
+ 		reg &= ~ARLTBL_RW;
++	if (dev->vlan_enabled)
++		reg &= ~ARLTBL_IVL_SVL_SELECT;
++	else
++		reg |= ARLTBL_IVL_SVL_SELECT;
+ 	b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
+ 
+ 	return b53_arl_op_wait(dev);
+@@ -1463,6 +1467,7 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
+ 			u16 vid, struct b53_arl_entry *ent, u8 *idx,
+ 			bool is_valid)
+ {
++	DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
+ 	unsigned int i;
+ 	int ret;
+ 
+@@ -1470,6 +1475,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
+ 	if (ret)
+ 		return ret;
+ 
++	bitmap_zero(free_bins, dev->num_arl_entries);
++
+ 	/* Read the bins */
+ 	for (i = 0; i < dev->num_arl_entries; i++) {
+ 		u64 mac_vid;
+@@ -1481,13 +1488,24 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
+ 			   B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
+ 		b53_arl_to_entry(ent, mac_vid, fwd_entry);
+ 
+-		if (!(fwd_entry & ARLTBL_VALID))
++		if (!(fwd_entry & ARLTBL_VALID)) {
++			set_bit(i, free_bins);
+ 			continue;
++		}
+ 		if ((mac_vid & ARLTBL_MAC_MASK) != mac)
+ 			continue;
++		if (dev->vlan_enabled &&
++		    ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
++			continue;
+ 		*idx = i;
++		return 0;
+ 	}
+ 
++	if (bitmap_weight(free_bins, dev->num_arl_entries) == 0)
++		return -ENOSPC;
++
++	*idx = find_first_bit(free_bins, dev->num_arl_entries);
++
+ 	return -ENOENT;
+ }
+ 
+@@ -1517,10 +1535,21 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
+ 	if (op)
+ 		return ret;
+ 
+-	/* We could not find a matching MAC, so reset to a new entry */
+-	if (ret) {
++	switch (ret) {
++	case -ENOSPC:
++		dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
++			addr, vid);
++		return is_valid ? ret : 0;
++	case -ENOENT:
++		/* We could not find a matching MAC, so reset to a new entry */
++		dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
++			addr, vid, idx);
+ 		fwd_entry = 0;
+-		idx = 1;
++		break;
++	default:
++		dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
++			addr, vid, idx);
++		break;
+ 	}
+ 
+ 	/* For multicast address, the port is a bitmask and the validity
+@@ -1538,7 +1567,6 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
+ 		ent.is_valid = !!(ent.port);
+ 	}
+ 
+-	ent.is_valid = is_valid;
+ 	ent.vid = vid;
+ 	ent.is_static = true;
+ 	ent.is_age = false;
+diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
+index 2a9f421680aa..c90985c294a2 100644
+--- a/drivers/net/dsa/b53/b53_regs.h
++++ b/drivers/net/dsa/b53/b53_regs.h
+@@ -292,6 +292,7 @@
+ /* ARL Table Read/Write Register (8 bit) */
+ #define B53_ARLTBL_RW_CTRL		0x00
+ #define    ARLTBL_RW			BIT(0)
++#define    ARLTBL_IVL_SVL_SELECT	BIT(6)
+ #define    ARLTBL_START_DONE		BIT(7)
+ 
+ /* MAC Address Index Register (48 bit) */
+@@ -304,7 +305,7 @@
+  *
+  * BCM5325 and BCM5365 share most definitions below
+  */
+-#define B53_ARLTBL_MAC_VID_ENTRY(n)	(0x10 * (n))
++#define B53_ARLTBL_MAC_VID_ENTRY(n)	((0x10 * (n)) + 0x10)
+ #define   ARLTBL_MAC_MASK		0xffffffffffffULL
+ #define   ARLTBL_VID_S			48
+ #define   ARLTBL_VID_MASK_25		0xff
+@@ -316,13 +317,16 @@
+ #define   ARLTBL_VALID_25		BIT(63)
+ 
+ /* ARL Table Data Entry N Registers (32 bit) */
+-#define B53_ARLTBL_DATA_ENTRY(n)	((0x10 * (n)) + 0x08)
++#define B53_ARLTBL_DATA_ENTRY(n)	((0x10 * (n)) + 0x18)
+ #define   ARLTBL_DATA_PORT_ID_MASK	0x1ff
+ #define   ARLTBL_TC(tc)			((3 & tc) << 11)
+ #define   ARLTBL_AGE			BIT(14)
+ #define   ARLTBL_STATIC			BIT(15)
+ #define   ARLTBL_VALID			BIT(16)
+ 
++/* Maximum number of bin entries in the ARL for all switches */
++#define B53_ARLTBL_MAX_BIN_ENTRIES	4
++
+ /* ARL Search Control Register (8 bit) */
+ #define B53_ARL_SRCH_CTL		0x50
+ #define B53_ARL_SRCH_CTL_25		0x20
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 1d678bee2cc9..b7c0c20e1325 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -938,6 +938,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
+ 	if (netif_running(dev))
+ 		bcmgenet_update_mib_counters(priv);
+ 
++	dev->netdev_ops->ndo_get_stats(dev);
++
+ 	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ 		const struct bcmgenet_stats *s;
+ 		char *p;
+@@ -3142,6 +3144,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+ 	dev->stats.rx_packets = rx_packets;
+ 	dev->stats.rx_errors = rx_errors;
+ 	dev->stats.rx_missed_errors = rx_errors;
++	dev->stats.rx_dropped = rx_dropped;
+ 	return &dev->stats;
+ }
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+index 19c11568113a..7b9cd69f9844 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+@@ -1049,9 +1049,9 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
+ 	}
+ }
+ 
+-static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
+-					   struct cudbg_error *cudbg_err,
+-					   u8 mem_type)
++static int cudbg_mem_region_size(struct cudbg_init *pdbg_init,
++				 struct cudbg_error *cudbg_err,
++				 u8 mem_type, unsigned long *region_size)
+ {
+ 	struct adapter *padap = pdbg_init->adap;
+ 	struct cudbg_meminfo mem_info;
+@@ -1060,15 +1060,23 @@ static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
+ 
+ 	memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
+ 	rc = cudbg_fill_meminfo(padap, &mem_info);
+-	if (rc)
++	if (rc) {
++		cudbg_err->sys_err = rc;
+ 		return rc;
++	}
+ 
+ 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
+ 	rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
+-	if (rc)
++	if (rc) {
++		cudbg_err->sys_err = rc;
+ 		return rc;
++	}
++
++	if (region_size)
++		*region_size = mem_info.avail[mc_idx].limit -
++			       mem_info.avail[mc_idx].base;
+ 
+-	return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
++	return 0;
+ }
+ 
+ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
+@@ -1076,7 +1084,12 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
+ 				    struct cudbg_error *cudbg_err,
+ 				    u8 mem_type)
+ {
+-	unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
++	unsigned long size = 0;
++	int rc;
++
++	rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size);
++	if (rc)
++		return rc;
+ 
+ 	return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
+ 				 cudbg_err);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+index af1f40cbccc8..f5bc996ac77d 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+@@ -311,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+  */
+ static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+ {
+-	struct adapter *adapter = (struct adapter *)container_of(ptp,
+-				   struct adapter, ptp_clock_info);
+-	struct fw_ptp_cmd c;
++	struct adapter *adapter = container_of(ptp, struct adapter,
++					       ptp_clock_info);
+ 	u64 ns;
+-	int err;
+-
+-	memset(&c, 0, sizeof(c));
+-	c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+-				     FW_CMD_REQUEST_F |
+-				     FW_CMD_READ_F |
+-				     FW_PTP_CMD_PORTID_V(0));
+-	c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
+-	c.u.ts.sc = FW_PTP_SC_GET_TIME;
+ 
+-	err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c);
+-	if (err < 0) {
+-		dev_err(adapter->pdev_dev,
+-			"PTP: %s error %d\n", __func__, -err);
+-		return err;
+-	}
++	ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A));
++	ns |= (u64)t4_read_reg(adapter,
++			       T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32;
+ 
+ 	/* convert to timespec*/
+-	ns = be64_to_cpu(c.u.ts.tm);
+ 	*ts = ns_to_timespec64(ns);
+-
+-	return err;
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+index a957a6e4d4c4..b0519c326692 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+@@ -1900,6 +1900,9 @@
+ 
+ #define MAC_PORT_CFG2_A 0x818
+ 
++#define MAC_PORT_PTP_SUM_LO_A 0x990
++#define MAC_PORT_PTP_SUM_HI_A 0x994
++
+ #define MPS_CMN_CTL_A	0x9000
+ 
+ #define COUNTPAUSEMCRX_S    5
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index 4d5ca302c067..a30edb436f4a 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -43,6 +43,7 @@
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
+ #include <linux/moduleparam.h>
++#include <linux/indirect_call_wrapper.h>
+ 
+ #include "mlx4_en.h"
+ 
+@@ -261,6 +262,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+ 	}
+ }
+ 
++INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
++						   struct mlx4_en_tx_ring *ring,
++						   int index, u64 timestamp,
++						   int napi_mode));
+ 
+ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ 			 struct mlx4_en_tx_ring *ring,
+@@ -329,6 +334,11 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ 	return tx_info->nr_txbb;
+ }
+ 
++INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
++						      struct mlx4_en_tx_ring *ring,
++						      int index, u64 timestamp,
++						      int napi_mode));
++
+ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ 			    struct mlx4_en_tx_ring *ring,
+ 			    int index, u64 timestamp,
+@@ -449,7 +459,9 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
+ 				timestamp = mlx4_en_get_cqe_ts(cqe);
+ 
+ 			/* free next descriptor */
+-			last_nr_txbb = ring->free_tx_desc(
++			last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc,
++						       mlx4_en_free_tx_desc,
++						       mlx4_en_recycle_tx_desc,
+ 					priv, ring, ring_index,
+ 					timestamp, napi_budget);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+index c51b2adfc1e1..2cbfa5cfefab 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+@@ -316,7 +316,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
+ 
+ 	block = kzalloc(sizeof(*block), GFP_KERNEL);
+ 	if (!block)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 	INIT_LIST_HEAD(&block->resource_list);
+ 	block->afa = mlxsw_afa;
+ 
+@@ -344,7 +344,7 @@ err_second_set_create:
+ 	mlxsw_afa_set_destroy(block->first_set);
+ err_first_set_create:
+ 	kfree(block);
+-	return NULL;
++	return ERR_PTR(-ENOMEM);
+ }
+ EXPORT_SYMBOL(mlxsw_afa_block_create);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+index 6c66a0f1b79e..ad69913f19c1 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+@@ -88,8 +88,8 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ 	 * to be written using PEFA register to all indexes for all regions.
+ 	 */
+ 	afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
+-	if (!afa_block) {
+-		err = -ENOMEM;
++	if (IS_ERR(afa_block)) {
++		err = PTR_ERR(afa_block);
+ 		goto err_afa_block;
+ 	}
+ 	err = mlxsw_afa_block_continue(afa_block);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+index 3d3cca596116..d77cdcb5c642 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+@@ -444,7 +444,7 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
+ 
+ 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
+ 	if (!rulei)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	if (afa_block) {
+ 		rulei->act_block = afa_block;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+index 346f4a5fe053..221aa6a474eb 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+@@ -199,8 +199,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
+ 	int err;
+ 
+ 	afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
+-	if (!afa_block)
+-		return ERR_PTR(-ENOMEM);
++	if (IS_ERR(afa_block))
++		return afa_block;
+ 
+ 	err = mlxsw_afa_block_append_allocated_counter(afa_block,
+ 						       counter_index);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+index 0e2fa14f1423..a3934ca6a043 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+@@ -119,6 +119,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
+ 		{ .div = 5, .val = 5, },
+ 		{ .div = 6, .val = 6, },
+ 		{ .div = 7, .val = 7, },
++		{ /* end of array */ }
+ 	};
+ 
+ 	clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
+diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
+index 269596c15133..2e5202923510 100644
+--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
++++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
+@@ -1387,6 +1387,8 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	regs_phys = res->start;
+ 	port->regs = devm_ioremap_resource(dev, res);
++	if (IS_ERR(port->regs))
++		return PTR_ERR(port->regs);
+ 
+ 	switch (port->id) {
+ 	case IXP4XX_ETH_NPEA:
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 09f279c0182b..6b461be1820b 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -1207,7 +1207,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
+ 		enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]);
+ 
+ 		if (df < 0 || df > GENEVE_DF_MAX) {
+-			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF],
++			NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_DF],
+ 					    "Invalid DF attribute");
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 9b4ae5c36da6..35aa7b0a2aeb 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3658,11 +3658,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 			  struct netlink_ext_ack *extack)
+ {
+ 	struct macsec_dev *macsec = macsec_priv(dev);
++	rx_handler_func_t *rx_handler;
++	u8 icv_len = DEFAULT_ICV_LEN;
+ 	struct net_device *real_dev;
+-	int err;
++	int err, mtu;
+ 	sci_t sci;
+-	u8 icv_len = DEFAULT_ICV_LEN;
+-	rx_handler_func_t *rx_handler;
+ 
+ 	if (!tb[IFLA_LINK])
+ 		return -EINVAL;
+@@ -3681,7 +3681,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 
+ 	if (data && data[IFLA_MACSEC_ICV_LEN])
+ 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
+-	dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
++	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
++	if (mtu < 0)
++		dev->mtu = 0;
++	else
++		dev->mtu = mtu;
+ 
+ 	rx_handler = rtnl_dereference(real_dev->rx_handler);
+ 	if (rx_handler && rx_handler != macsec_handle_frame)
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index e7289d67268f..0482adc9916b 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -1704,7 +1704,7 @@ static int macvlan_device_event(struct notifier_block *unused,
+ 						struct macvlan_dev,
+ 						list);
+ 
+-		if (macvlan_sync_address(vlan->dev, dev->dev_addr))
++		if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr))
+ 			return NOTIFY_BAD;
+ 
+ 		break;
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 4004f98e50d9..04845a4017f9 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -468,6 +468,9 @@ static const struct team_mode *team_mode_get(const char *kind)
+ 	struct team_mode_item *mitem;
+ 	const struct team_mode *mode = NULL;
+ 
++	if (!try_module_get(THIS_MODULE))
++		return NULL;
++
+ 	spin_lock(&mode_list_lock);
+ 	mitem = __find_mode(kind);
+ 	if (!mitem) {
+@@ -483,6 +486,7 @@ static const struct team_mode *team_mode_get(const char *kind)
+ 	}
+ 
+ 	spin_unlock(&mode_list_lock);
++	module_put(THIS_MODULE);
+ 	return mode;
+ }
+ 
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index b8228f50bc94..6716deeb35e3 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -188,8 +188,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
+ 	fl6.flowi6_proto = iph->nexthdr;
+ 	fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
+ 
+-	dst = ip6_route_output(net, NULL, &fl6);
+-	if (dst == dst_null)
++	dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
++	if (IS_ERR(dst) || dst == dst_null)
+ 		goto err;
+ 
+ 	skb_dst_drop(skb);
+@@ -474,7 +474,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+ 	if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
+ 		return skb;
+ 
+-	if (qdisc_tx_is_default(vrf_dev))
++	if (qdisc_tx_is_default(vrf_dev) ||
++	    IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
+ 		return vrf_ip6_out_direct(vrf_dev, sk, skb);
+ 
+ 	return vrf_ip6_out_redirect(vrf_dev, skb);
+@@ -686,7 +687,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
+ 	    ipv4_is_lbcast(ip_hdr(skb)->daddr))
+ 		return skb;
+ 
+-	if (qdisc_tx_is_default(vrf_dev))
++	if (qdisc_tx_is_default(vrf_dev) ||
++	    IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
+ 		return vrf_ip_out_direct(vrf_dev, sk, skb);
+ 
+ 	return vrf_ip_out_redirect(vrf_dev, skb);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 45308b3350cf..a5b415fed11e 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -3144,7 +3144,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
+ 		u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+ 
+ 		if (id >= VXLAN_N_VID) {
+-			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID],
++			NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_ID],
+ 					    "VXLAN ID must be lower than 16777216");
+ 			return -ERANGE;
+ 		}
+@@ -3155,7 +3155,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
+ 			= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
+ 
+ 		if (ntohs(p->high) < ntohs(p->low)) {
+-			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
++			NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_PORT_RANGE],
+ 					    "Invalid source port range");
+ 			return -EINVAL;
+ 		}
+@@ -3165,7 +3165,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
+ 		enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
+ 
+ 		if (df < 0 || df > VXLAN_DF_MAX) {
+-			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF],
++			NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_DF],
+ 					    "Invalid DF attribute");
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+index 6209f85a71dd..0af9e997c9f6 100644
+--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
++++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+@@ -374,7 +374,7 @@ out:
+ }
+ 
+ static void *
+-il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
++il3945_rs_alloc(struct ieee80211_hw *hw)
+ {
+ 	return hw->priv;
+ }
+diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+index 7c6e2c863497..0a02d8aca320 100644
+--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
++++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+@@ -2474,7 +2474,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
+ }
+ 
+ static void *
+-il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
++il4965_rs_alloc(struct ieee80211_hw *hw)
+ {
+ 	return hw->priv;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+index 226165db7dfd..dac809df7f1d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+@@ -3019,7 +3019,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
+ 			cpu_to_le16(priv->lib->bt_params->agg_time_limit);
+ }
+ 
+-static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
++static void *rs_alloc(struct ieee80211_hw *hw)
+ {
+ 	return hw->priv;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index ba2aff3af0fe..e3a33388be70 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -296,9 +296,14 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ 		if (!prof->enabled) {
+ 			IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
+ 					profs[i]);
+-			/* if one of the profiles is disabled, we fail all */
+-			return -ENOENT;
++			/*
++			 * if one of the profiles is disabled, we
++			 * ignore all of them and return 1 to
++			 * differentiate disabled from other failures.
++			 */
++			return 1;
+ 		}
++
+ 		IWL_DEBUG_INFO(fwrt,
+ 			       "SAR EWRD: chain %d profile index %d\n",
+ 			       i, profs[i]);
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+index 73196cbc7fbe..75d958bab0e3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+@@ -8,7 +8,7 @@
+  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2019 Intel Corporation
++ * Copyright(c) 2019 - 2020 Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -31,7 +31,7 @@
+  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2019 Intel Corporation
++ * Copyright(c) 2019 - 2020 Intel Corporation
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -99,7 +99,7 @@ enum iwl_mvm_dqa_txq {
+ 	IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
+ 	IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
+ 	IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
+-	IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
++	IWL_MVM_DQA_MAX_DATA_QUEUE = 30,
+ };
+ 
+ enum iwl_mvm_tx_fifo {
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+index bab0999f002c..252c2ca1b0ed 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+@@ -532,8 +532,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
+ 					IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ 					IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ 				.mac_cap_info[2] =
+-					IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
+-					IEEE80211_HE_MAC_CAP2_ACK_EN,
++					IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
+ 				.mac_cap_info[3] =
+ 					IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ 					IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
+@@ -617,8 +616,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
+ 					IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ 					IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ 				.mac_cap_info[2] =
+-					IEEE80211_HE_MAC_CAP2_BSR |
+-					IEEE80211_HE_MAC_CAP2_ACK_EN,
++					IEEE80211_HE_MAC_CAP2_BSR,
+ 				.mac_cap_info[3] =
+ 					IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ 					IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 98263cd37944..a8ee79441848 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -698,6 +698,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+ 		struct iwl_dev_tx_power_cmd_v4 v4;
+ 	} cmd;
+ 
++	int ret;
+ 	u16 len = 0;
+ 
+ 	cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
+@@ -712,9 +713,14 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+ 		len = sizeof(cmd.v4.v3);
+ 
+ 
+-	if (iwl_sar_select_profile(&mvm->fwrt, cmd.v5.v3.per_chain_restriction,
+-				   prof_a, prof_b))
+-		return -ENOENT;
++	ret = iwl_sar_select_profile(&mvm->fwrt,
++				     cmd.v5.v3.per_chain_restriction,
++				     prof_a, prof_b);
++
++	/* return on error or if the profile is disabled (positive number) */
++	if (ret)
++		return ret;
++
+ 	IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+ 	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ }
+@@ -1005,16 +1011,7 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+ 				"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
+ 				ret);
+ 
+-	ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
+-	/*
+-	 * If we don't have profile 0 from BIOS, just skip it.  This
+-	 * means that SAR Geo will not be enabled either, even if we
+-	 * have other valid profiles.
+-	 */
+-	if (ret == -ENOENT)
+-		return 1;
+-
+-	return ret;
++	return iwl_mvm_sar_select_profile(mvm, 1, 1);
+ }
+ 
+ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
+@@ -1236,7 +1233,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
+ 	ret = iwl_mvm_sar_init(mvm);
+ 	if (ret == 0) {
+ 		ret = iwl_mvm_sar_geo_init(mvm);
+-	} else if (ret > 0 && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
++	} else if (ret == -ENOENT && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
+ 		/*
+ 		 * If basic SAR is not available, we check for WGDS,
+ 		 * which should *not* be available either.  If it is
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+index 1a990ed9c3ca..08bef33a1d7e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+@@ -3665,7 +3665,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+ 			cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
+ }
+ 
+-static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
++static void *rs_alloc(struct ieee80211_hw *hw)
+ {
+ 	return hw->priv;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+index 5ee33c8ae9d2..77b8def26edb 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+@@ -8,7 +8,7 @@
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 - 2019 Intel Corporation
++ * Copyright(c) 2018 - 2020 Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -31,7 +31,7 @@
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 - 2019 Intel Corporation
++ * Copyright(c) 2018 - 2020 Intel Corporation
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -566,6 +566,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ 
+ struct iwl_mvm_stat_data {
+ 	struct iwl_mvm *mvm;
++	__le32 flags;
+ 	__le32 mac_id;
+ 	u8 beacon_filter_average_energy;
+ 	void *general;
+@@ -606,6 +607,13 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
+ 			-general->beacon_average_energy[vif_id];
+ 	}
+ 
++	/* make sure that beacon statistics don't go backwards with TCM
++	 * request to clear statistics
++	 */
++	if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
++		mvmvif->beacon_stats.accu_num_beacons +=
++			mvmvif->beacon_stats.num_beacons;
++
+ 	if (mvmvif->id != id)
+ 		return;
+ 
+@@ -763,6 +771,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+ 
+ 		flags = stats->flag;
+ 	}
++	data.flags = flags;
+ 
+ 	iwl_mvm_rx_stats_check_trigger(mvm, pkt);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index 64ef3f3ba23b..56ae72debb96 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -722,6 +722,11 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
++	if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
++		 "max queue %d >= num_of_queues (%d)", maxq,
++		 mvm->trans->trans_cfg->base_params->num_of_queues))
++		maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
++
+ 	/* This should not be hit with new TX path */
+ 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ 		return -ENOSPC;
+@@ -1164,9 +1169,9 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
+ 						   inactive_tid_bitmap,
+ 						   &unshare_queues,
+ 						   &changetid_queues);
+-		if (ret >= 0 && free_queue < 0) {
++		if (ret && free_queue < 0) {
+ 			queue_owner = sta;
+-			free_queue = ret;
++			free_queue = i;
+ 		}
+ 		/* only unlock sta lock - we still need the queue info lock */
+ 		spin_unlock_bh(&mvmsta->lock);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+index 01f248ba8fec..9d5b1e51b50d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+@@ -129,6 +129,18 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ 	int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ 			      trans->cfg->min_txq_size);
+ 
++	switch (trans_pcie->rx_buf_size) {
++	case IWL_AMSDU_DEF:
++		return -EINVAL;
++	case IWL_AMSDU_2K:
++		break;
++	case IWL_AMSDU_4K:
++	case IWL_AMSDU_8K:
++	case IWL_AMSDU_12K:
++		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
++		break;
++	}
++
+ 	/* Allocate prph scratch */
+ 	prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
+ 					  &trans_pcie->prph_scratch_dma_addr,
+@@ -143,10 +155,8 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ 		cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ 	prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
+ 
+-	control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
+-			IWL_PRPH_SCRATCH_MTR_MODE |
+-			(IWL_PRPH_MTR_FORMAT_256B &
+-			 IWL_PRPH_SCRATCH_MTR_FORMAT);
++	control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
++	control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
+ 
+ 	/* initialize RX default queue */
+ 	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+index 86fc00167817..9664dbc70ef1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+@@ -1418,6 +1418,9 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
+ 
+ 	iwl_pcie_gen2_txq_unmap(trans, queue);
+ 
++	iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
++	trans_pcie->txq[queue] = NULL;
++
+ 	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
+index 0c7d74902d33..4b5ea0ec9109 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
+@@ -261,7 +261,7 @@ static void rtl_rate_update(void *ppriv,
+ {
+ }
+ 
+-static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
++static void *rtl_rate_alloc(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	return rtlpriv;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index a4d8c90ee7cc..652ca87dac94 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -6,6 +6,7 @@
+ 
+ #include <linux/blkdev.h>
+ #include <linux/blk-mq.h>
++#include <linux/compat.h>
+ #include <linux/delay.h>
+ #include <linux/errno.h>
+ #include <linux/hdreg.h>
+@@ -1248,6 +1249,18 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
+ 	queue_work(nvme_wq, &ctrl->async_event_work);
+ }
+ 
++/*
++ * Convert integer values from ioctl structures to user pointers, silently
++ * ignoring the upper bits in the compat case to match behaviour of 32-bit
++ * kernels.
++ */
++static void __user *nvme_to_user_ptr(uintptr_t ptrval)
++{
++	if (in_compat_syscall())
++		ptrval = (compat_uptr_t)ptrval;
++	return (void __user *)ptrval;
++}
++
+ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+ {
+ 	struct nvme_user_io io;
+@@ -1271,7 +1284,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+ 
+ 	length = (io.nblocks + 1) << ns->lba_shift;
+ 	meta_len = (io.nblocks + 1) * ns->ms;
+-	metadata = (void __user *)(uintptr_t)io.metadata;
++	metadata = nvme_to_user_ptr(io.metadata);
+ 
+ 	if (ns->ext) {
+ 		length += meta_len;
+@@ -1294,7 +1307,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+ 	c.rw.appmask = cpu_to_le16(io.appmask);
+ 
+ 	return nvme_submit_user_cmd(ns->queue, &c,
+-			(void __user *)(uintptr_t)io.addr, length,
++			nvme_to_user_ptr(io.addr), length,
+ 			metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
+ }
+ 
+@@ -1414,9 +1427,9 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ 
+ 	effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
+ 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+-			(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
+-			(void __user *)(uintptr_t)cmd.metadata,
+-			cmd.metadata_len, 0, &result, timeout);
++			nvme_to_user_ptr(cmd.addr), cmd.data_len,
++			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
++			0, &result, timeout);
+ 	nvme_passthru_end(ctrl, effects);
+ 
+ 	if (status >= 0) {
+@@ -1461,8 +1474,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ 
+ 	effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
+ 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+-			(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
+-			(void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
++			nvme_to_user_ptr(cmd.addr), cmd.data_len,
++			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
+ 			0, &cmd.result, timeout);
+ 	nvme_passthru_end(ctrl, effects);
+ 
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index a11900cf3a36..906dc0faa48e 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -514,7 +514,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+ 	if (!nr_nsids)
+ 		return 0;
+ 
+-	down_write(&ctrl->namespaces_rwsem);
++	down_read(&ctrl->namespaces_rwsem);
+ 	list_for_each_entry(ns, &ctrl->namespaces, list) {
+ 		unsigned nsid = le32_to_cpu(desc->nsids[n]);
+ 
+@@ -525,7 +525,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+ 		if (++n == nr_nsids)
+ 			break;
+ 	}
+-	up_write(&ctrl->namespaces_rwsem);
++	up_read(&ctrl->namespaces_rwsem);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 49d4373b84eb..00e6aa59954d 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -164,16 +164,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
+ static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
+ {
+ 	struct request *rq;
+-	unsigned int bytes;
+ 
+ 	if (unlikely(nvme_tcp_async_req(req)))
+ 		return false; /* async events don't have a request */
+ 
+ 	rq = blk_mq_rq_from_pdu(req);
+-	bytes = blk_rq_payload_bytes(rq);
+ 
+-	return rq_data_dir(rq) == WRITE && bytes &&
+-		bytes <= nvme_tcp_inline_data_size(req->queue);
++	return rq_data_dir(rq) == WRITE && req->data_len &&
++		req->data_len <= nvme_tcp_inline_data_size(req->queue);
+ }
+ 
+ static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
+@@ -2090,7 +2088,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
+ 
+ 	c->common.flags |= NVME_CMD_SGL_METABUF;
+ 
+-	if (rq_data_dir(rq) == WRITE && req->data_len &&
++	if (!blk_rq_nr_phys_segments(rq))
++		nvme_tcp_set_sg_null(c);
++	else if (rq_data_dir(rq) == WRITE &&
+ 	    req->data_len <= nvme_tcp_inline_data_size(queue))
+ 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
+ 	else
+@@ -2117,7 +2117,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
+ 	req->data_sent = 0;
+ 	req->pdu_len = 0;
+ 	req->pdu_sent = 0;
+-	req->data_len = blk_rq_payload_bytes(rq);
++	req->data_len = blk_rq_nr_phys_segments(rq) ?
++				blk_rq_payload_bytes(rq) : 0;
+ 	req->curr_bio = rq->bio;
+ 
+ 	if (rq_data_dir(rq) == WRITE &&
+diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
+index 91e24f01b54e..d78f86f8e462 100644
+--- a/drivers/pwm/pwm-bcm2835.c
++++ b/drivers/pwm/pwm-bcm2835.c
+@@ -166,6 +166,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
+ 
+ 	pc->chip.dev = &pdev->dev;
+ 	pc->chip.ops = &bcm2835_pwm_ops;
++	pc->chip.base = -1;
+ 	pc->chip.npwm = 2;
+ 	pc->chip.of_xlate = of_pwm_xlate_with_flags;
+ 	pc->chip.of_pwm_n_cells = 3;
+diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
+index 35a7ac42269c..7e5ed0152977 100644
+--- a/drivers/pwm/pwm-imx27.c
++++ b/drivers/pwm/pwm-imx27.c
+@@ -289,7 +289,7 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	writel(cr, imx->mmio_base + MX3_PWMCR);
+ 
+-	if (!state->enabled && cstate.enabled)
++	if (!state->enabled)
+ 		pwm_imx27_clk_disable_unprepare(chip);
+ 
+ 	return 0;
+diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
+index 2685577b6dd4..7ab9eb6616d9 100644
+--- a/drivers/pwm/pwm-rcar.c
++++ b/drivers/pwm/pwm-rcar.c
+@@ -229,24 +229,28 @@ static int rcar_pwm_probe(struct platform_device *pdev)
+ 	rcar_pwm->chip.base = -1;
+ 	rcar_pwm->chip.npwm = 1;
+ 
++	pm_runtime_enable(&pdev->dev);
++
+ 	ret = pwmchip_add(&rcar_pwm->chip);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret);
++		pm_runtime_disable(&pdev->dev);
+ 		return ret;
+ 	}
+ 
+-	pm_runtime_enable(&pdev->dev);
+-
+ 	return 0;
+ }
+ 
+ static int rcar_pwm_remove(struct platform_device *pdev)
+ {
+ 	struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
++	int ret;
++
++	ret = pwmchip_remove(&rcar_pwm->chip);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+ 
+-	return pwmchip_remove(&rcar_pwm->chip);
++	return ret;
+ }
+ 
+ static const struct of_device_id rcar_pwm_of_table[] = {
+diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
+index 4a855a21b782..8032acc84161 100644
+--- a/drivers/pwm/pwm-renesas-tpu.c
++++ b/drivers/pwm/pwm-renesas-tpu.c
+@@ -415,16 +415,17 @@ static int tpu_probe(struct platform_device *pdev)
+ 	tpu->chip.base = -1;
+ 	tpu->chip.npwm = TPU_CHANNEL_MAX;
+ 
++	pm_runtime_enable(&pdev->dev);
++
+ 	ret = pwmchip_add(&tpu->chip);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to register PWM chip\n");
++		pm_runtime_disable(&pdev->dev);
+ 		return ret;
+ 	}
+ 
+ 	dev_info(&pdev->dev, "TPU PWM %d registered\n", tpu->pdev->id);
+ 
+-	pm_runtime_enable(&pdev->dev);
+-
+ 	return 0;
+ }
+ 
+@@ -434,12 +435,10 @@ static int tpu_remove(struct platform_device *pdev)
+ 	int ret;
+ 
+ 	ret = pwmchip_remove(&tpu->chip);
+-	if (ret)
+-		return ret;
+ 
+ 	pm_runtime_disable(&pdev->dev);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ #ifdef CONFIG_OF
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index 0c6245fc7706..983f9c9e08de 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -849,8 +849,10 @@ static void io_subchannel_register(struct ccw_device *cdev)
+ 	 * Now we know this subchannel will stay, we can throw
+ 	 * our delayed uevent.
+ 	 */
+-	dev_set_uevent_suppress(&sch->dev, 0);
+-	kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
++	if (dev_get_uevent_suppress(&sch->dev)) {
++		dev_set_uevent_suppress(&sch->dev, 0);
++		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
++	}
+ 	/* make it known to the system */
+ 	ret = ccw_device_add(cdev);
+ 	if (ret) {
+@@ -1058,8 +1060,11 @@ static int io_subchannel_probe(struct subchannel *sch)
+ 		 * Throw the delayed uevent for the subchannel, register
+ 		 * the ccw_device and exit.
+ 		 */
+-		dev_set_uevent_suppress(&sch->dev, 0);
+-		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
++		if (dev_get_uevent_suppress(&sch->dev)) {
++			/* should always be the case for the console */
++			dev_set_uevent_suppress(&sch->dev, 0);
++			kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
++		}
+ 		cdev = sch_get_cdev(sch);
+ 		rc = ccw_device_add(cdev);
+ 		if (rc) {
+diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
+index e401a3d0aa57..339a6bc0339b 100644
+--- a/drivers/s390/cio/vfio_ccw_drv.c
++++ b/drivers/s390/cio/vfio_ccw_drv.c
+@@ -167,6 +167,11 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
+ 	if (ret)
+ 		goto out_disable;
+ 
++	if (dev_get_uevent_suppress(&sch->dev)) {
++		dev_set_uevent_suppress(&sch->dev, 0);
++		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
++	}
++
+ 	VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
+ 			   sch->schid.cssid, sch->schid.ssid,
+ 			   sch->schid.sch_no);
+diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
+index da6e97d8dc3b..6bb8917b99a1 100644
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -1208,9 +1208,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
+ 		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ 		if (!rjt)
+ 			FC_RPORT_DBG(rdata, "PRLI bad response\n");
+-		else
++		else {
+ 			FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+ 				     rjt->er_reason, rjt->er_explan);
++			if (rjt->er_reason == ELS_RJT_UNAB &&
++			    rjt->er_explan == ELS_EXPL_PLOGI_REQD) {
++				fc_rport_enter_plogi(rdata);
++				goto out;
++			}
++		}
+ 		fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
+ 	}
+ 
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index 3f2cb17c4574..828873d5b3e8 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -482,7 +482,7 @@ struct lpfc_vport {
+ 	struct dentry *debug_nvmestat;
+ 	struct dentry *debug_scsistat;
+ 	struct dentry *debug_nvmektime;
+-	struct dentry *debug_cpucheck;
++	struct dentry *debug_hdwqstat;
+ 	struct dentry *vport_debugfs_root;
+ 	struct lpfc_debugfs_trc *disc_trc;
+ 	atomic_t disc_trc_cnt;
+@@ -1176,12 +1176,11 @@ struct lpfc_hba {
+ 	uint16_t sfp_warning;
+ 
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+-	uint16_t cpucheck_on;
++	uint16_t hdwqstat_on;
+ #define LPFC_CHECK_OFF		0
+ #define LPFC_CHECK_NVME_IO	1
+-#define LPFC_CHECK_NVMET_RCV	2
+-#define LPFC_CHECK_NVMET_IO	4
+-#define LPFC_CHECK_SCSI_IO	8
++#define LPFC_CHECK_NVMET_IO	2
++#define LPFC_CHECK_SCSI_IO	4
+ 	uint16_t ktime_on;
+ 	uint64_t ktime_data_samples;
+ 	uint64_t ktime_status_samples;
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index 819335b16c2e..1b8be1006cbe 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -1603,42 +1603,50 @@ out:
+ }
+ 
+ /**
+- * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer
++ * lpfc_debugfs_hdwqstat_data - Dump I/O stats to a buffer
+  * @vport: The vport to gather target node info from.
+  * @buf: The buffer to dump log into.
+  * @size: The maximum amount of data to process.
+  *
+  * Description:
+- * This routine dumps the NVME statistics associated with @vport
++ * This routine dumps the NVME + SCSI statistics associated with @vport
+  *
+  * Return Value:
+  * This routine returns the amount of bytes that were dumped into @buf and will
+  * not exceed @size.
+  **/
+ static int
+-lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
++lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
+ {
+ 	struct lpfc_hba   *phba = vport->phba;
+ 	struct lpfc_sli4_hdw_queue *qp;
+-	int i, j, max_cnt;
+-	int len = 0;
++	struct lpfc_hdwq_stat *c_stat;
++	int i, j, len;
+ 	uint32_t tot_xmt;
+ 	uint32_t tot_rcv;
+ 	uint32_t tot_cmpl;
++	char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
+ 
+-	len += scnprintf(buf + len, PAGE_SIZE - len,
+-			"CPUcheck %s ",
+-			(phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
+-				"Enabled" : "Disabled"));
+-	if (phba->nvmet_support) {
+-		len += scnprintf(buf + len, PAGE_SIZE - len,
+-				"%s\n",
+-				(phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
+-					"Rcv Enabled\n" : "Rcv Disabled\n"));
+-	} else {
+-		len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+-	}
+-	max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ;
++	scnprintf(tmp, sizeof(tmp), "HDWQ Stats:\n\n");
++	if (strlcat(buf, tmp, size) >= size)
++		goto buffer_done;
++
++	scnprintf(tmp, sizeof(tmp), "(NVME Accounting: %s) ",
++		  (phba->hdwqstat_on &
++		  (LPFC_CHECK_NVME_IO | LPFC_CHECK_NVMET_IO) ?
++		  "Enabled" : "Disabled"));
++	if (strlcat(buf, tmp, size) >= size)
++		goto buffer_done;
++
++	scnprintf(tmp, sizeof(tmp), "(SCSI Accounting: %s) ",
++		  (phba->hdwqstat_on & LPFC_CHECK_SCSI_IO ?
++		  "Enabled" : "Disabled"));
++	if (strlcat(buf, tmp, size) >= size)
++		goto buffer_done;
++
++	scnprintf(tmp, sizeof(tmp), "\n\n");
++	if (strlcat(buf, tmp, size) >= size)
++		goto buffer_done;
+ 
+ 	for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ 		qp = &phba->sli4_hba.hdwq[i];
+@@ -1646,46 +1654,76 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
+ 		tot_rcv = 0;
+ 		tot_xmt = 0;
+ 		tot_cmpl = 0;
+-		for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
+-			tot_xmt += qp->cpucheck_xmt_io[j];
+-			tot_cmpl += qp->cpucheck_cmpl_io[j];
+-			if (phba->nvmet_support)
+-				tot_rcv += qp->cpucheck_rcv_io[j];
+-		}
+ 
+-		/* Only display Hardware Qs with something */
+-		if (!tot_xmt && !tot_cmpl && !tot_rcv)
+-			continue;
++		for_each_present_cpu(j) {
++			c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, j);
++
++			/* Only display for this HDWQ */
++			if (i != c_stat->hdwq_no)
++				continue;
+ 
+-		len += scnprintf(buf + len, PAGE_SIZE - len,
+-				"HDWQ %03d: ", i);
+-		for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
+ 			/* Only display non-zero counters */
+-			if (!qp->cpucheck_xmt_io[j] &&
+-			    !qp->cpucheck_cmpl_io[j] &&
+-			    !qp->cpucheck_rcv_io[j])
++			if (!c_stat->xmt_io && !c_stat->cmpl_io &&
++			    !c_stat->rcv_io)
+ 				continue;
++
++			if (!tot_xmt && !tot_cmpl && !tot_rcv) {
++				/* Print HDWQ string only the first time */
++				scnprintf(tmp, sizeof(tmp), "[HDWQ %d]:\t", i);
++				if (strlcat(buf, tmp, size) >= size)
++					goto buffer_done;
++			}
++
++			tot_xmt += c_stat->xmt_io;
++			tot_cmpl += c_stat->cmpl_io;
++			if (phba->nvmet_support)
++				tot_rcv += c_stat->rcv_io;
++
++			scnprintf(tmp, sizeof(tmp), "| [CPU %d]: ", j);
++			if (strlcat(buf, tmp, size) >= size)
++				goto buffer_done;
++
+ 			if (phba->nvmet_support) {
+-				len += scnprintf(buf + len, PAGE_SIZE - len,
+-						"CPU %03d: %x/%x/%x ", j,
+-						qp->cpucheck_rcv_io[j],
+-						qp->cpucheck_xmt_io[j],
+-						qp->cpucheck_cmpl_io[j]);
++				scnprintf(tmp, sizeof(tmp),
++					  "XMT 0x%x CMPL 0x%x RCV 0x%x |",
++					  c_stat->xmt_io, c_stat->cmpl_io,
++					  c_stat->rcv_io);
++				if (strlcat(buf, tmp, size) >= size)
++					goto buffer_done;
+ 			} else {
+-				len += scnprintf(buf + len, PAGE_SIZE - len,
+-						"CPU %03d: %x/%x ", j,
+-						qp->cpucheck_xmt_io[j],
+-						qp->cpucheck_cmpl_io[j]);
++				scnprintf(tmp, sizeof(tmp),
++					  "XMT 0x%x CMPL 0x%x |",
++					  c_stat->xmt_io, c_stat->cmpl_io);
++				if (strlcat(buf, tmp, size) >= size)
++					goto buffer_done;
+ 			}
+ 		}
+-		len += scnprintf(buf + len, PAGE_SIZE - len,
+-				"Total: %x\n", tot_xmt);
+-		if (len >= max_cnt) {
+-			len += scnprintf(buf + len, PAGE_SIZE - len,
+-					"Truncated ...\n");
+-			return len;
++
++		/* Check if nothing to display */
++		if (!tot_xmt && !tot_cmpl && !tot_rcv)
++			continue;
++
++		scnprintf(tmp, sizeof(tmp), "\t->\t[HDWQ Total: ");
++		if (strlcat(buf, tmp, size) >= size)
++			goto buffer_done;
++
++		if (phba->nvmet_support) {
++			scnprintf(tmp, sizeof(tmp),
++				  "XMT 0x%x CMPL 0x%x RCV 0x%x]\n\n",
++				  tot_xmt, tot_cmpl, tot_rcv);
++			if (strlcat(buf, tmp, size) >= size)
++				goto buffer_done;
++		} else {
++			scnprintf(tmp, sizeof(tmp),
++				  "XMT 0x%x CMPL 0x%x]\n\n",
++				  tot_xmt, tot_cmpl);
++			if (strlcat(buf, tmp, size) >= size)
++				goto buffer_done;
+ 		}
+ 	}
++
++buffer_done:
++	len = strnlen(buf, size);
+ 	return len;
+ }
+ 
+@@ -2921,7 +2959,7 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
+ }
+ 
+ static int
+-lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
++lpfc_debugfs_hdwqstat_open(struct inode *inode, struct file *file)
+ {
+ 	struct lpfc_vport *vport = inode->i_private;
+ 	struct lpfc_debug *debug;
+@@ -2932,14 +2970,14 @@ lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
+ 		goto out;
+ 
+ 	 /* Round to page boundary */
+-	debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL);
++	debug->buffer = kcalloc(1, LPFC_SCSISTAT_SIZE, GFP_KERNEL);
+ 	if (!debug->buffer) {
+ 		kfree(debug);
+ 		goto out;
+ 	}
+ 
+-	debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer,
+-		LPFC_CPUCHECK_SIZE);
++	debug->len = lpfc_debugfs_hdwqstat_data(vport, debug->buffer,
++						LPFC_SCSISTAT_SIZE);
+ 
+ 	debug->i_private = inode->i_private;
+ 	file->private_data = debug;
+@@ -2950,16 +2988,16 @@ out:
+ }
+ 
+ static ssize_t
+-lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
++lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
+ 			    size_t nbytes, loff_t *ppos)
+ {
+ 	struct lpfc_debug *debug = file->private_data;
+ 	struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+ 	struct lpfc_hba   *phba = vport->phba;
+-	struct lpfc_sli4_hdw_queue *qp;
++	struct lpfc_hdwq_stat *c_stat;
+ 	char mybuf[64];
+ 	char *pbuf;
+-	int i, j;
++	int i;
+ 
+ 	if (nbytes > 64)
+ 		nbytes = 64;
+@@ -2972,41 +3010,39 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
+ 
+ 	if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
+ 		if (phba->nvmet_support)
+-			phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
++			phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO;
+ 		else
+-			phba->cpucheck_on |= (LPFC_CHECK_NVME_IO |
++			phba->hdwqstat_on |= (LPFC_CHECK_NVME_IO |
+ 				LPFC_CHECK_SCSI_IO);
+ 		return strlen(pbuf);
+ 	} else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) {
+ 		if (phba->nvmet_support)
+-			phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
++			phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO;
+ 		else
+-			phba->cpucheck_on |= LPFC_CHECK_NVME_IO;
++			phba->hdwqstat_on |= LPFC_CHECK_NVME_IO;
+ 		return strlen(pbuf);
+ 	} else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) {
+-		phba->cpucheck_on |= LPFC_CHECK_SCSI_IO;
++		if (!phba->nvmet_support)
++			phba->hdwqstat_on |= LPFC_CHECK_SCSI_IO;
+ 		return strlen(pbuf);
+-	} else if ((strncmp(pbuf, "rcv",
+-		   sizeof("rcv") - 1) == 0)) {
+-		if (phba->nvmet_support)
+-			phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV;
+-		else
+-			return -EINVAL;
++	} else if ((strncmp(pbuf, "nvme_off", sizeof("nvme_off") - 1) == 0)) {
++		phba->hdwqstat_on &= ~(LPFC_CHECK_NVME_IO |
++				       LPFC_CHECK_NVMET_IO);
++		return strlen(pbuf);
++	} else if ((strncmp(pbuf, "scsi_off", sizeof("scsi_off") - 1) == 0)) {
++		phba->hdwqstat_on &= ~LPFC_CHECK_SCSI_IO;
+ 		return strlen(pbuf);
+ 	} else if ((strncmp(pbuf, "off",
+ 		   sizeof("off") - 1) == 0)) {
+-		phba->cpucheck_on = LPFC_CHECK_OFF;
++		phba->hdwqstat_on = LPFC_CHECK_OFF;
+ 		return strlen(pbuf);
+ 	} else if ((strncmp(pbuf, "zero",
+ 		   sizeof("zero") - 1) == 0)) {
+-		for (i = 0; i < phba->cfg_hdw_queue; i++) {
+-			qp = &phba->sli4_hba.hdwq[i];
+-
+-			for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
+-				qp->cpucheck_rcv_io[j] = 0;
+-				qp->cpucheck_xmt_io[j] = 0;
+-				qp->cpucheck_cmpl_io[j] = 0;
+-			}
++		for_each_present_cpu(i) {
++			c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, i);
++			c_stat->xmt_io = 0;
++			c_stat->cmpl_io = 0;
++			c_stat->rcv_io = 0;
+ 		}
+ 		return strlen(pbuf);
+ 	}
+@@ -5451,13 +5487,13 @@ static const struct file_operations lpfc_debugfs_op_nvmeio_trc = {
+ 	.release =      lpfc_debugfs_release,
+ };
+ 
+-#undef lpfc_debugfs_op_cpucheck
+-static const struct file_operations lpfc_debugfs_op_cpucheck = {
++#undef lpfc_debugfs_op_hdwqstat
++static const struct file_operations lpfc_debugfs_op_hdwqstat = {
+ 	.owner =        THIS_MODULE,
+-	.open =         lpfc_debugfs_cpucheck_open,
++	.open =         lpfc_debugfs_hdwqstat_open,
+ 	.llseek =       lpfc_debugfs_lseek,
+ 	.read =         lpfc_debugfs_read,
+-	.write =	lpfc_debugfs_cpucheck_write,
++	.write =	lpfc_debugfs_hdwqstat_write,
+ 	.release =      lpfc_debugfs_release,
+ };
+ 
+@@ -6081,11 +6117,11 @@ nvmeio_off:
+ 				    vport->vport_debugfs_root,
+ 				    vport, &lpfc_debugfs_op_nvmektime);
+ 
+-	snprintf(name, sizeof(name), "cpucheck");
+-	vport->debug_cpucheck =
++	snprintf(name, sizeof(name), "hdwqstat");
++	vport->debug_hdwqstat =
+ 		debugfs_create_file(name, 0644,
+ 				    vport->vport_debugfs_root,
+-				    vport, &lpfc_debugfs_op_cpucheck);
++				    vport, &lpfc_debugfs_op_hdwqstat);
+ 
+ 	/*
+ 	 * The following section is for additional directories/files for the
+@@ -6219,8 +6255,8 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
+ 	debugfs_remove(vport->debug_nvmektime); /* nvmektime */
+ 	vport->debug_nvmektime = NULL;
+ 
+-	debugfs_remove(vport->debug_cpucheck); /* cpucheck */
+-	vport->debug_cpucheck = NULL;
++	debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */
++	vport->debug_hdwqstat = NULL;
+ 
+ 	if (vport->vport_debugfs_root) {
+ 		debugfs_remove(vport->vport_debugfs_root); /* vportX */
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
+index 20f2537af511..6643b9bfd4f3 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.h
++++ b/drivers/scsi/lpfc/lpfc_debugfs.h
+@@ -47,7 +47,6 @@
+ /* nvmestat output buffer size */
+ #define LPFC_NVMESTAT_SIZE 8192
+ #define LPFC_NVMEKTIME_SIZE 8192
+-#define LPFC_CPUCHECK_SIZE 8192
+ #define LPFC_NVMEIO_TRC_SIZE 8192
+ 
+ /* scsistat output buffer size */
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 5a605773dd0a..48fde2b1ebba 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -6935,6 +6935,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+ 		rc = -ENOMEM;
+ 		goto out_free_hba_cpu_map;
+ 	}
++
++#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
++	phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
++	if (!phba->sli4_hba.c_stat) {
++		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
++				"3332 Failed allocating per cpu hdwq stats\n");
++		rc = -ENOMEM;
++		goto out_free_hba_eq_info;
++	}
++#endif
++
+ 	/*
+ 	 * Enable sr-iov virtual functions if supported and configured
+ 	 * through the module parameter.
+@@ -6954,6 +6965,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+ 
+ 	return 0;
+ 
++#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
++out_free_hba_eq_info:
++	free_percpu(phba->sli4_hba.eq_info);
++#endif
+ out_free_hba_cpu_map:
+ 	kfree(phba->sli4_hba.cpu_map);
+ out_free_hba_eq_hdl:
+@@ -6992,6 +7007,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
+ 	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+ 
+ 	free_percpu(phba->sli4_hba.eq_info);
++#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
++	free_percpu(phba->sli4_hba.c_stat);
++#endif
+ 
+ 	/* Free memory allocated for msi-x interrupt vector to CPU mapping */
+ 	kfree(phba->sli4_hba.cpu_map);
+@@ -10831,6 +10849,9 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
+ #ifdef CONFIG_X86
+ 	struct cpuinfo_x86 *cpuinfo;
+ #endif
++#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
++	struct lpfc_hdwq_stat *c_stat;
++#endif
+ 
+ 	max_phys_id = 0;
+ 	min_phys_id = LPFC_VECTOR_MAP_EMPTY;
+@@ -11082,10 +11103,17 @@ found_any:
+ 	idx = 0;
+ 	for_each_possible_cpu(cpu) {
+ 		cpup = &phba->sli4_hba.cpu_map[cpu];
++#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
++		c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
++		c_stat->hdwq_no = cpup->hdwq;
++#endif
+ 		if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
+ 			continue;
+ 
+ 		cpup->hdwq = idx++ % phba->cfg_hdw_queue;
++#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
++		c_stat->hdwq_no = cpup->hdwq;
++#endif
+ 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ 				"3340 Set Affinity: not present "
+ 				"CPU %d hdwq %d\n",
+@@ -11175,11 +11203,9 @@ static void lpfc_cpuhp_add(struct lpfc_hba *phba)
+ 
+ 	rcu_read_lock();
+ 
+-	if (!list_empty(&phba->poll_list)) {
+-		timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
++	if (!list_empty(&phba->poll_list))
+ 		mod_timer(&phba->cpuhp_poll_timer,
+ 			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+-	}
+ 
+ 	rcu_read_unlock();
+ 
+@@ -13145,6 +13171,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
+ 	lpfc_sli4_ras_setup(phba);
+ 
+ 	INIT_LIST_HEAD(&phba->poll_list);
++	timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ 	cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
+ 
+ 	return 0;
+diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
+index db4a04a207ec..8403d7ceafe4 100644
+--- a/drivers/scsi/lpfc/lpfc_nvme.c
++++ b/drivers/scsi/lpfc/lpfc_nvme.c
+@@ -382,13 +382,15 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
+ 	if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
+ 		ndlp->nrport = NULL;
+ 		ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
+-	}
+-	spin_unlock_irq(&vport->phba->hbalock);
++		spin_unlock_irq(&vport->phba->hbalock);
+ 
+-	/* Remove original register reference. The host transport
+-	 * won't reference this rport/remoteport any further.
+-	 */
+-	lpfc_nlp_put(ndlp);
++		/* Remove original register reference. The host transport
++		 * won't reference this rport/remoteport any further.
++		 */
++		lpfc_nlp_put(ndlp);
++	} else {
++		spin_unlock_irq(&vport->phba->hbalock);
++	}
+ 
+  rport_err:
+ 	return;
+@@ -1010,6 +1012,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ 	uint32_t code, status, idx;
+ 	uint16_t cid, sqhd, data;
+ 	uint32_t *ptr;
++#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
++	int cpu;
++#endif
+ 
+ 	/* Sanity check on return of outstanding command */
+ 	if (!lpfc_ncmd) {
+@@ -1182,19 +1187,15 @@ out_err:
+ 		phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
+ 		lpfc_nvme_ktime(phba, lpfc_ncmd);
+ 	}
+-	if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
+-		uint32_t cpu;
+-		idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
++	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
+ 		cpu = raw_smp_processor_id();
+-		if (cpu < LPFC_CHECK_CPU_CNT) {
+-			if (lpfc_ncmd->cpu != cpu)
+-				lpfc_printf_vlog(vport,
+-						 KERN_INFO, LOG_NVME_IOERR,
+-						 "6701 CPU Check cmpl: "
+-						 "cpu %d expect %d\n",
+-						 cpu, lpfc_ncmd->cpu);
+-			phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
+-		}
++		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
++		if (lpfc_ncmd->cpu != cpu)
++			lpfc_printf_vlog(vport,
++					 KERN_INFO, LOG_NVME_IOERR,
++					 "6701 CPU Check cmpl: "
++					 "cpu %d expect %d\n",
++					 cpu, lpfc_ncmd->cpu);
+ 	}
+ #endif
+ 
+@@ -1743,19 +1744,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
+ 	if (lpfc_ncmd->ts_cmd_start)
+ 		lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
+ 
+-	if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
++	if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
+ 		cpu = raw_smp_processor_id();
+-		if (cpu < LPFC_CHECK_CPU_CNT) {
+-			lpfc_ncmd->cpu = cpu;
+-			if (idx != cpu)
+-				lpfc_printf_vlog(vport,
+-						 KERN_INFO, LOG_NVME_IOERR,
+-						"6702 CPU Check cmd: "
+-						"cpu %d wq %d\n",
+-						lpfc_ncmd->cpu,
+-						lpfc_queue_info->index);
+-			phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
+-		}
++		this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
++		lpfc_ncmd->cpu = cpu;
++		if (idx != cpu)
++			lpfc_printf_vlog(vport,
++					 KERN_INFO, LOG_NVME_IOERR,
++					"6702 CPU Check cmd: "
++					"cpu %d wq %d\n",
++					lpfc_ncmd->cpu,
++					lpfc_queue_info->index);
+ 	}
+ #endif
+ 	return 0;
+diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
+index 9dc9afe1c255..f3760a4827d8 100644
+--- a/drivers/scsi/lpfc/lpfc_nvmet.c
++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
+@@ -707,7 +707,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ 	struct lpfc_nvmet_rcv_ctx *ctxp;
+ 	uint32_t status, result, op, start_clean, logerr;
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+-	uint32_t id;
++	int id;
+ #endif
+ 
+ 	ctxp = cmdwqe->context2;
+@@ -814,16 +814,14 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ 		rsp->done(rsp);
+ 	}
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+-	if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
++	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
+ 		id = raw_smp_processor_id();
+-		if (id < LPFC_CHECK_CPU_CNT) {
+-			if (ctxp->cpu != id)
+-				lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+-						"6704 CPU Check cmdcmpl: "
+-						"cpu %d expect %d\n",
+-						id, ctxp->cpu);
+-			phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
+-		}
++		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
++		if (ctxp->cpu != id)
++			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
++					"6704 CPU Check cmdcmpl: "
++					"cpu %d expect %d\n",
++					id, ctxp->cpu);
+ 	}
+ #endif
+ }
+@@ -931,6 +929,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
+ 	struct lpfc_sli_ring *pring;
+ 	unsigned long iflags;
+ 	int rc;
++#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
++	int id;
++#endif
+ 
+ 	if (phba->pport->load_flag & FC_UNLOADING) {
+ 		rc = -ENODEV;
+@@ -954,16 +955,14 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
+ 	if (!ctxp->hdwq)
+ 		ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
+ 
+-	if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+-		int id = raw_smp_processor_id();
+-		if (id < LPFC_CHECK_CPU_CNT) {
+-			if (rsp->hwqid != id)
+-				lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+-						"6705 CPU Check OP: "
+-						"cpu %d expect %d\n",
+-						id, rsp->hwqid);
+-			phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
+-		}
++	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
++		id = raw_smp_processor_id();
++		this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
++		if (rsp->hwqid != id)
++			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
++					"6705 CPU Check OP: "
++					"cpu %d expect %d\n",
++					id, rsp->hwqid);
+ 		ctxp->cpu = id; /* Setup cpu for cmpl check */
+ 	}
+ #endif
+@@ -2270,15 +2269,13 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
+ 	size = nvmebuf->bytes_recv;
+ 
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+-	if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
+-		if (current_cpu < LPFC_CHECK_CPU_CNT) {
+-			if (idx != current_cpu)
+-				lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+-						"6703 CPU Check rcv: "
+-						"cpu %d expect %d\n",
+-						current_cpu, idx);
+-			phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
+-		}
++	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
++		this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
++		if (idx != current_cpu)
++			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
++					"6703 CPU Check rcv: "
++					"cpu %d expect %d\n",
++					current_cpu, idx);
+ 	}
+ #endif
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 96ac4a154c58..ed8bcbd043c4 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -3805,9 +3805,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+ 	struct Scsi_Host *shost;
+ 	int idx;
+ 	uint32_t logit = LOG_FCP;
+-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+-	int cpu;
+-#endif
+ 
+ 	/* Guard against abort handler being called at same time */
+ 	spin_lock(&lpfc_cmd->buf_lock);
+@@ -3826,11 +3823,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+ 		phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
+ 
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+-	if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
+-		cpu = raw_smp_processor_id();
+-		if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
+-			phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
+-	}
++	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
++		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
+ #endif
+ 	shost = cmd->device->host;
+ 
+@@ -4503,9 +4497,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+ 	struct lpfc_io_buf *lpfc_cmd;
+ 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+ 	int err, idx;
+-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+-	int cpu;
+-#endif
+ 
+ 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ 
+@@ -4626,14 +4617,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+ 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+ 
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+-	if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
+-		cpu = raw_smp_processor_id();
+-		if (cpu < LPFC_CHECK_CPU_CNT) {
+-			struct lpfc_sli4_hdw_queue *hdwq =
+-					&phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
+-			hdwq->cpucheck_xmt_io[cpu]++;
+-		}
+-	}
++	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
++		this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
+ #endif
+ 	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
+ 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 64002b0cb02d..396e24764a1b 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -2511,6 +2511,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 	    !pmb->u.mb.mbxStatus) {
+ 		rpi = pmb->u.mb.un.varWords[0];
+ 		vpi = pmb->u.mb.un.varRegLogin.vpi;
++		if (phba->sli_rev == LPFC_SLI_REV4)
++			vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
+ 		lpfc_unreg_login(phba, vpi, rpi, pmb);
+ 		pmb->vport = vport;
+ 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+@@ -4044,6 +4046,11 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
+ 	struct lpfc_iocbq *piocb, *next_iocb;
+ 
+ 	spin_lock_irq(&phba->hbalock);
++	if (phba->hba_flag & HBA_IOQ_FLUSH ||
++	    !phba->sli4_hba.hdwq) {
++		spin_unlock_irq(&phba->hbalock);
++		return;
++	}
+ 	/* Indicate the I/O queues are flushed */
+ 	phba->hba_flag |= HBA_IOQ_FLUSH;
+ 	spin_unlock_irq(&phba->hbalock);
+@@ -14450,12 +14457,10 @@ static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
+ {
+ 	struct lpfc_hba *phba = eq->phba;
+ 
+-	if (list_empty(&phba->poll_list)) {
+-		timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+-		/* kickstart slowpath processing for this eq */
++	/* kickstart slowpath processing if needed */
++	if (list_empty(&phba->poll_list))
+ 		mod_timer(&phba->cpuhp_poll_timer,
+ 			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+-	}
+ 
+ 	list_add_rcu(&eq->_poll_list, &phba->poll_list);
+ 	synchronize_rcu();
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index d963ca871383..8da7429e385a 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -697,13 +697,6 @@ struct lpfc_sli4_hdw_queue {
+ 	struct lpfc_lock_stat lock_conflict;
+ #endif
+ 
+-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+-#define LPFC_CHECK_CPU_CNT    128
+-	uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
+-	uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
+-	uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
+-#endif
+-
+ 	/* Per HDWQ pool resources */
+ 	struct list_head sgl_list;
+ 	struct list_head cmd_rsp_buf_list;
+@@ -740,6 +733,15 @@ struct lpfc_sli4_hdw_queue {
+ #define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
+ #endif
+ 
++#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
++struct lpfc_hdwq_stat {
++	u32 hdwq_no;
++	u32 rcv_io;
++	u32 xmt_io;
++	u32 cmpl_io;
++};
++#endif
++
+ struct lpfc_sli4_hba {
+ 	void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
+ 					   * config space registers
+@@ -921,6 +923,9 @@ struct lpfc_sli4_hba {
+ 	struct cpumask numa_mask;
+ 	uint16_t curr_disp_cpu;
+ 	struct lpfc_eq_intr_info __percpu *eq_info;
++#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
++	struct lpfc_hdwq_stat __percpu *c_stat;
++#endif
+ 	uint32_t conf_trunk;
+ #define lpfc_conf_trunk_port0_WORD	conf_trunk
+ #define lpfc_conf_trunk_port0_SHIFT	0
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index dfc726fa34e3..443ace019852 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -2012,7 +2012,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ 	if (session->target_id == ISCSI_MAX_TARGET) {
+ 		spin_unlock_irqrestore(&session->lock, flags);
+ 		mutex_unlock(&ihost->mutex);
+-		return;
++		goto unbind_session_exit;
+ 	}
+ 
+ 	target_id = session->target_id;
+@@ -2024,6 +2024,8 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ 		ida_simple_remove(&iscsi_sess_ida, target_id);
+ 
+ 	scsi_remove_target(&session->dev);
++
++unbind_session_exit:
+ 	iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
+ 	ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
+ }
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 08d1bbbebf2d..e84b4fb493d6 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -2725,8 +2725,10 @@ static int comedi_open(struct inode *inode, struct file *file)
+ 	}
+ 
+ 	cfp = kzalloc(sizeof(*cfp), GFP_KERNEL);
+-	if (!cfp)
++	if (!cfp) {
++		comedi_dev_put(dev);
+ 		return -ENOMEM;
++	}
+ 
+ 	cfp->dev = dev;
+ 
+diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
+index 83026ba63d1c..78a7c1b3448a 100644
+--- a/drivers/staging/comedi/drivers/dt2815.c
++++ b/drivers/staging/comedi/drivers/dt2815.c
+@@ -92,6 +92,7 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
+ 	int ret;
+ 
+ 	for (i = 0; i < insn->n; i++) {
++		/* FIXME: lo bit 0 chooses voltage output or current output */
+ 		lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01;
+ 		hi = (data[i] & 0xff0) >> 4;
+ 
+@@ -105,6 +106,8 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
+ 		if (ret)
+ 			return ret;
+ 
++		outb(hi, dev->iobase + DT2815_DATA);
++
+ 		devpriv->ao_readback[chan] = data[i];
+ 	}
+ 	return i;
+diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c
+index a2d67c28f530..5f0e089573a2 100644
+--- a/drivers/staging/gasket/gasket_sysfs.c
++++ b/drivers/staging/gasket/gasket_sysfs.c
+@@ -228,8 +228,7 @@ int gasket_sysfs_create_entries(struct device *device,
+ 	}
+ 
+ 	mutex_lock(&mapping->mutex);
+-	for (i = 0; strcmp(attrs[i].attr.attr.name, GASKET_ARRAY_END_MARKER);
+-		i++) {
++	for (i = 0; attrs[i].attr.attr.name != NULL; i++) {
+ 		if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) {
+ 			dev_err(device,
+ 				"Maximum number of sysfs nodes reached for device\n");
+diff --git a/drivers/staging/gasket/gasket_sysfs.h b/drivers/staging/gasket/gasket_sysfs.h
+index 1d0eed66a7f4..ab5aa351d555 100644
+--- a/drivers/staging/gasket/gasket_sysfs.h
++++ b/drivers/staging/gasket/gasket_sysfs.h
+@@ -30,10 +30,6 @@
+  */
+ #define GASKET_SYSFS_MAX_NODES 196
+ 
+-/* End markers for sysfs struct arrays. */
+-#define GASKET_ARRAY_END_TOKEN GASKET_RESERVED_ARRAY_END
+-#define GASKET_ARRAY_END_MARKER __stringify(GASKET_ARRAY_END_TOKEN)
+-
+ /*
+  * Terminator struct for a gasket_sysfs_attr array. Must be at the end of
+  * all gasket_sysfs_attribute arrays.
+diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
+index af215860be4c..ac563e23868e 100644
+--- a/drivers/staging/vt6656/int.c
++++ b/drivers/staging/vt6656/int.c
+@@ -145,7 +145,8 @@ void vnt_int_process_data(struct vnt_private *priv)
+ 				priv->wake_up_count =
+ 					priv->hw->conf.listen_interval;
+ 
+-			--priv->wake_up_count;
++			if (priv->wake_up_count)
++				--priv->wake_up_count;
+ 
+ 			/* Turn on wake up to listen next beacon */
+ 			if (priv->wake_up_count == 1)
+diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
+index dcd933a6b66e..40c58ac4e209 100644
+--- a/drivers/staging/vt6656/key.c
++++ b/drivers/staging/vt6656/key.c
+@@ -83,9 +83,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
+ 	case  VNT_KEY_PAIRWISE:
+ 		key_mode |= mode;
+ 		key_inx = 4;
+-		/* Don't save entry for pairwise key for station mode */
+-		if (priv->op_mode == NL80211_IFTYPE_STATION)
+-			clear_bit(entry, &priv->key_entry_inuse);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -109,7 +106,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
+ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ 		 struct ieee80211_vif *vif, struct ieee80211_key_conf *key)
+ {
+-	struct ieee80211_bss_conf *conf = &vif->bss_conf;
+ 	struct vnt_private *priv = hw->priv;
+ 	u8 *mac_addr = NULL;
+ 	u8 key_dec_mode = 0;
+@@ -151,16 +147,12 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ 	}
+ 
+-	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
++	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ 		vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE,
+ 				key_dec_mode, true);
+-	} else {
+-		vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY,
++	else
++		vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS,
+ 				key_dec_mode, true);
+ 
+-		vnt_set_keymode(hw, (u8 *)conf->bssid, key,
+-				VNT_KEY_GROUP_ADDRESS, key_dec_mode, true);
+-	}
+-
+ 	return 0;
+ }
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index 5e48b3ddb94c..1da9905a23b8 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -632,8 +632,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ 
+ 	priv->op_mode = vif->type;
+ 
+-	vnt_set_bss_mode(priv);
+-
+ 	/* LED blink on TX */
+ 	vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER);
+ 
+@@ -720,7 +718,6 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
+ 		priv->basic_rates = conf->basic_rates;
+ 
+ 		vnt_update_top_rates(priv);
+-		vnt_set_bss_mode(priv);
+ 
+ 		dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates);
+ 	}
+@@ -749,11 +746,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
+ 			priv->short_slot_time = false;
+ 
+ 		vnt_set_short_slot_time(priv);
+-		vnt_update_ifs(priv);
+ 		vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
+ 		vnt_update_pre_ed_threshold(priv, false);
+ 	}
+ 
++	if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE |
++		       BSS_CHANGED_ERP_SLOT))
++		vnt_set_bss_mode(priv);
++
+ 	if (changed & BSS_CHANGED_TXPOWER)
+ 		vnt_rf_setpower(priv, priv->current_rate,
+ 				conf->chandef.chan->hw_value);
+@@ -777,12 +777,15 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
+ 			vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL,
+ 					    TFTCTL_TSFCNTREN);
+ 
+-			vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
+-				       conf->sync_tsf, priv->current_tsf);
+-
+ 			vnt_mac_set_beacon_interval(priv, conf->beacon_int);
+ 
+ 			vnt_reset_next_tbtt(priv, conf->beacon_int);
++
++			vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
++				       conf->sync_tsf, priv->current_tsf);
++
++			vnt_update_next_tbtt(priv,
++					     conf->sync_tsf, conf->beacon_int);
+ 		} else {
+ 			vnt_clear_current_tsf(priv);
+ 
+@@ -817,15 +820,11 @@ static void vnt_configure(struct ieee80211_hw *hw,
+ {
+ 	struct vnt_private *priv = hw->priv;
+ 	u8 rx_mode = 0;
+-	int rc;
+ 
+ 	*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
+ 
+-	rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
+-			    MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
+-
+-	if (!rc)
+-		rx_mode = RCR_MULTICAST | RCR_BROADCAST;
++	vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
++		       MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
+ 
+ 	dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode);
+ 
+@@ -866,8 +865,12 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 			return -EOPNOTSUPP;
+ 		break;
+ 	case DISABLE_KEY:
+-		if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
++		if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) {
+ 			clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
++
++			vnt_mac_disable_keyentry(priv, key->hw_key_idx);
++		}
++
+ 	default:
+ 		break;
+ 	}
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index 27284a2dcd2b..436cc51c92c3 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -302,10 +302,6 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
+ 	vtermnos[index] = vtermno;
+ 	cons_ops[index] = ops;
+ 
+-	/* reserve all indices up to and including this index */
+-	if (last_hvc < index)
+-		last_hvc = index;
+-
+ 	/* check if we need to re-register the kernel console */
+ 	hvc_check_console(index);
+ 
+@@ -960,13 +956,22 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
+ 		    cons_ops[i] == hp->ops)
+ 			break;
+ 
+-	/* no matching slot, just use a counter */
+-	if (i >= MAX_NR_HVC_CONSOLES)
+-		i = ++last_hvc;
++	if (i >= MAX_NR_HVC_CONSOLES) {
++
++		/* find 'empty' slot for console */
++		for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) {
++		}
++
++		/* no matching slot, just use a counter */
++		if (i == MAX_NR_HVC_CONSOLES)
++			i = ++last_hvc + MAX_NR_HVC_CONSOLES;
++	}
+ 
+ 	hp->index = i;
+-	cons_ops[i] = ops;
+-	vtermnos[i] = vtermno;
++	if (i < MAX_NR_HVC_CONSOLES) {
++		cons_ops[i] = ops;
++		vtermnos[i] = vtermno;
++	}
+ 
+ 	list_add_tail(&(hp->next), &hvc_structs);
+ 	mutex_unlock(&hvc_structs_mutex);
+diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
+index fbaa4ec85560..e2138e7d5dc6 100644
+--- a/drivers/tty/rocket.c
++++ b/drivers/tty/rocket.c
+@@ -632,18 +632,21 @@ init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
+ 	tty_port_init(&info->port);
+ 	info->port.ops = &rocket_port_ops;
+ 	info->flags &= ~ROCKET_MODE_MASK;
+-	switch (pc104[board][line]) {
+-	case 422:
+-		info->flags |= ROCKET_MODE_RS422;
+-		break;
+-	case 485:
+-		info->flags |= ROCKET_MODE_RS485;
+-		break;
+-	case 232:
+-	default:
++	if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1))
++		switch (pc104[board][line]) {
++		case 422:
++			info->flags |= ROCKET_MODE_RS422;
++			break;
++		case 485:
++			info->flags |= ROCKET_MODE_RS485;
++			break;
++		case 232:
++		default:
++			info->flags |= ROCKET_MODE_RS232;
++			break;
++		}
++	else
+ 		info->flags |= ROCKET_MODE_RS232;
+-		break;
+-	}
+ 
+ 	info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR;
+ 	if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {
+diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
+index 42c8cc93b603..c149f8c30007 100644
+--- a/drivers/tty/serial/owl-uart.c
++++ b/drivers/tty/serial/owl-uart.c
+@@ -680,6 +680,12 @@ static int owl_uart_probe(struct platform_device *pdev)
+ 		return PTR_ERR(owl_port->clk);
+ 	}
+ 
++	ret = clk_prepare_enable(owl_port->clk);
++	if (ret) {
++		dev_err(&pdev->dev, "could not enable clk\n");
++		return ret;
++	}
++
+ 	owl_port->port.dev = &pdev->dev;
+ 	owl_port->port.line = pdev->id;
+ 	owl_port->port.type = PORT_OWL;
+@@ -712,6 +718,7 @@ static int owl_uart_remove(struct platform_device *pdev)
+ 
+ 	uart_remove_one_port(&owl_uart_driver, &owl_port->port);
+ 	owl_uart_ports[pdev->id] = NULL;
++	clk_disable_unprepare(owl_port->clk);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index c073aa7001c4..e1179e74a2b8 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -870,9 +870,16 @@ static void sci_receive_chars(struct uart_port *port)
+ 				tty_insert_flip_char(tport, c, TTY_NORMAL);
+ 		} else {
+ 			for (i = 0; i < count; i++) {
+-				char c = serial_port_in(port, SCxRDR);
+-
+-				status = serial_port_in(port, SCxSR);
++				char c;
++
++				if (port->type == PORT_SCIF ||
++				    port->type == PORT_HSCIF) {
++					status = serial_port_in(port, SCxSR);
++					c = serial_port_in(port, SCxRDR);
++				} else {
++					c = serial_port_in(port, SCxRDR);
++					status = serial_port_in(port, SCxSR);
++				}
+ 				if (uart_handle_sysrq_char(port, c)) {
+ 					count--; i--;
+ 					continue;
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
+index 98db9dc168ff..7a9b360b0438 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -26,13 +26,15 @@
+ 
+ #define CDNS_UART_TTY_NAME	"ttyPS"
+ #define CDNS_UART_NAME		"xuartps"
++#define CDNS_UART_MAJOR		0	/* use dynamic node allocation */
++#define CDNS_UART_MINOR		0	/* works best with devtmpfs */
++#define CDNS_UART_NR_PORTS	16
+ #define CDNS_UART_FIFO_SIZE	64	/* FIFO size */
+ #define CDNS_UART_REGISTER_SPACE	0x1000
+ #define TX_TIMEOUT		500000
+ 
+ /* Rx Trigger level */
+ static int rx_trigger_level = 56;
+-static int uartps_major;
+ module_param(rx_trigger_level, uint, 0444);
+ MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes");
+ 
+@@ -188,7 +190,6 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
+  * @pclk:		APB clock
+  * @cdns_uart_driver:	Pointer to UART driver
+  * @baud:		Current baud rate
+- * @id:			Port ID
+  * @clk_rate_change_nb:	Notifier block for clock changes
+  * @quirks:		Flags for RXBS support.
+  */
+@@ -198,7 +199,6 @@ struct cdns_uart {
+ 	struct clk		*pclk;
+ 	struct uart_driver	*cdns_uart_driver;
+ 	unsigned int		baud;
+-	int			id;
+ 	struct notifier_block	clk_rate_change_nb;
+ 	u32			quirks;
+ 	bool cts_override;
+@@ -1145,6 +1145,8 @@ static const struct uart_ops cdns_uart_ops = {
+ #endif
+ };
+ 
++static struct uart_driver cdns_uart_uart_driver;
++
+ #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
+ /**
+  * cdns_uart_console_putchar - write the character to the FIFO buffer
+@@ -1284,6 +1286,16 @@ static int cdns_uart_console_setup(struct console *co, char *options)
+ 
+ 	return uart_set_options(port, co, baud, parity, bits, flow);
+ }
++
++static struct console cdns_uart_console = {
++	.name	= CDNS_UART_TTY_NAME,
++	.write	= cdns_uart_console_write,
++	.device	= uart_console_device,
++	.setup	= cdns_uart_console_setup,
++	.flags	= CON_PRINTBUFFER,
++	.index	= -1, /* Specified on the cmdline (e.g. console=ttyPS ) */
++	.data	= &cdns_uart_uart_driver,
++};
+ #endif /* CONFIG_SERIAL_XILINX_PS_UART_CONSOLE */
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -1415,89 +1427,8 @@ static const struct of_device_id cdns_uart_of_match[] = {
+ };
+ MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
+ 
+-/*
+- * Maximum number of instances without alias IDs but if there is alias
+- * which target "< MAX_UART_INSTANCES" range this ID can't be used.
+- */
+-#define MAX_UART_INSTANCES	32
+-
+-/* Stores static aliases list */
+-static DECLARE_BITMAP(alias_bitmap, MAX_UART_INSTANCES);
+-static int alias_bitmap_initialized;
+-
+-/* Stores actual bitmap of allocated IDs with alias IDs together */
+-static DECLARE_BITMAP(bitmap, MAX_UART_INSTANCES);
+-/* Protect bitmap operations to have unique IDs */
+-static DEFINE_MUTEX(bitmap_lock);
+-
+-static int cdns_get_id(struct platform_device *pdev)
+-{
+-	int id, ret;
+-
+-	mutex_lock(&bitmap_lock);
+-
+-	/* Alias list is stable that's why get alias bitmap only once */
+-	if (!alias_bitmap_initialized) {
+-		ret = of_alias_get_alias_list(cdns_uart_of_match, "serial",
+-					      alias_bitmap, MAX_UART_INSTANCES);
+-		if (ret && ret != -EOVERFLOW) {
+-			mutex_unlock(&bitmap_lock);
+-			return ret;
+-		}
+-
+-		alias_bitmap_initialized++;
+-	}
+-
+-	/* Make sure that alias ID is not taken by instance without alias */
+-	bitmap_or(bitmap, bitmap, alias_bitmap, MAX_UART_INSTANCES);
+-
+-	dev_dbg(&pdev->dev, "Alias bitmap: %*pb\n",
+-		MAX_UART_INSTANCES, bitmap);
+-
+-	/* Look for a serialN alias */
+-	id = of_alias_get_id(pdev->dev.of_node, "serial");
+-	if (id < 0) {
+-		dev_warn(&pdev->dev,
+-			 "No serial alias passed. Using the first free id\n");
+-
+-		/*
+-		 * Start with id 0 and check if there is no serial0 alias
+-		 * which points to device which is compatible with this driver.
+-		 * If alias exists then try next free position.
+-		 */
+-		id = 0;
+-
+-		for (;;) {
+-			dev_info(&pdev->dev, "Checking id %d\n", id);
+-			id = find_next_zero_bit(bitmap, MAX_UART_INSTANCES, id);
+-
+-			/* No free empty instance */
+-			if (id == MAX_UART_INSTANCES) {
+-				dev_err(&pdev->dev, "No free ID\n");
+-				mutex_unlock(&bitmap_lock);
+-				return -EINVAL;
+-			}
+-
+-			dev_dbg(&pdev->dev, "The empty id is %d\n", id);
+-			/* Check if ID is empty */
+-			if (!test_and_set_bit(id, bitmap)) {
+-				/* Break the loop if bit is taken */
+-				dev_dbg(&pdev->dev,
+-					"Selected ID %d allocation passed\n",
+-					id);
+-				break;
+-			}
+-			dev_dbg(&pdev->dev,
+-				"Selected ID %d allocation failed\n", id);
+-			/* if taking bit fails then try next one */
+-			id++;
+-		}
+-	}
+-
+-	mutex_unlock(&bitmap_lock);
+-
+-	return id;
+-}
++/* Temporary variable for storing number of instances */
++static int instances;
+ 
+ /**
+  * cdns_uart_probe - Platform driver probe
+@@ -1507,16 +1438,11 @@ static int cdns_get_id(struct platform_device *pdev)
+  */
+ static int cdns_uart_probe(struct platform_device *pdev)
+ {
+-	int rc, irq;
++	int rc, id, irq;
+ 	struct uart_port *port;
+ 	struct resource *res;
+ 	struct cdns_uart *cdns_uart_data;
+ 	const struct of_device_id *match;
+-	struct uart_driver *cdns_uart_uart_driver;
+-	char *driver_name;
+-#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
+-	struct console *cdns_uart_console;
+-#endif
+ 
+ 	cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
+ 			GFP_KERNEL);
+@@ -1526,64 +1452,35 @@ static int cdns_uart_probe(struct platform_device *pdev)
+ 	if (!port)
+ 		return -ENOMEM;
+ 
+-	cdns_uart_uart_driver = devm_kzalloc(&pdev->dev,
+-					     sizeof(*cdns_uart_uart_driver),
+-					     GFP_KERNEL);
+-	if (!cdns_uart_uart_driver)
+-		return -ENOMEM;
+-
+-	cdns_uart_data->id = cdns_get_id(pdev);
+-	if (cdns_uart_data->id < 0)
+-		return cdns_uart_data->id;
++	/* Look for a serialN alias */
++	id = of_alias_get_id(pdev->dev.of_node, "serial");
++	if (id < 0)
++		id = 0;
+ 
+-	/* There is a need to use unique driver name */
+-	driver_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%d",
+-				     CDNS_UART_NAME, cdns_uart_data->id);
+-	if (!driver_name) {
+-		rc = -ENOMEM;
+-		goto err_out_id;
++	if (id >= CDNS_UART_NR_PORTS) {
++		dev_err(&pdev->dev, "Cannot get uart_port structure\n");
++		return -ENODEV;
+ 	}
+ 
+-	cdns_uart_uart_driver->owner = THIS_MODULE;
+-	cdns_uart_uart_driver->driver_name = driver_name;
+-	cdns_uart_uart_driver->dev_name	= CDNS_UART_TTY_NAME;
+-	cdns_uart_uart_driver->major = uartps_major;
+-	cdns_uart_uart_driver->minor = cdns_uart_data->id;
+-	cdns_uart_uart_driver->nr = 1;
+-
++	if (!cdns_uart_uart_driver.state) {
++		cdns_uart_uart_driver.owner = THIS_MODULE;
++		cdns_uart_uart_driver.driver_name = CDNS_UART_NAME;
++		cdns_uart_uart_driver.dev_name = CDNS_UART_TTY_NAME;
++		cdns_uart_uart_driver.major = CDNS_UART_MAJOR;
++		cdns_uart_uart_driver.minor = CDNS_UART_MINOR;
++		cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
+ #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
+-	cdns_uart_console = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_console),
+-					 GFP_KERNEL);
+-	if (!cdns_uart_console) {
+-		rc = -ENOMEM;
+-		goto err_out_id;
+-	}
+-
+-	strncpy(cdns_uart_console->name, CDNS_UART_TTY_NAME,
+-		sizeof(cdns_uart_console->name));
+-	cdns_uart_console->index = cdns_uart_data->id;
+-	cdns_uart_console->write = cdns_uart_console_write;
+-	cdns_uart_console->device = uart_console_device;
+-	cdns_uart_console->setup = cdns_uart_console_setup;
+-	cdns_uart_console->flags = CON_PRINTBUFFER;
+-	cdns_uart_console->data = cdns_uart_uart_driver;
+-	cdns_uart_uart_driver->cons = cdns_uart_console;
++		cdns_uart_uart_driver.cons = &cdns_uart_console;
+ #endif
+ 
+-	rc = uart_register_driver(cdns_uart_uart_driver);
+-	if (rc < 0) {
+-		dev_err(&pdev->dev, "Failed to register driver\n");
+-		goto err_out_id;
++		rc = uart_register_driver(&cdns_uart_uart_driver);
++		if (rc < 0) {
++			dev_err(&pdev->dev, "Failed to register driver\n");
++			return rc;
++		}
+ 	}
+ 
+-	cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver;
+-
+-	/*
+-	 * Setting up proper name_base needs to be done after uart
+-	 * registration because tty_driver structure is not filled.
+-	 * name_base is 0 by default.
+-	 */
+-	cdns_uart_uart_driver->tty_driver->name_base = cdns_uart_data->id;
++	cdns_uart_data->cdns_uart_driver = &cdns_uart_uart_driver;
+ 
+ 	match = of_match_node(cdns_uart_of_match, pdev->dev.of_node);
+ 	if (match && match->data) {
+@@ -1661,6 +1558,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
+ 	port->ops	= &cdns_uart_ops;
+ 	port->fifosize	= CDNS_UART_FIFO_SIZE;
+ 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE);
++	port->line	= id;
+ 
+ 	/*
+ 	 * Register the port.
+@@ -1692,7 +1590,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
+ 		console_port = port;
+ #endif
+ 
+-	rc = uart_add_one_port(cdns_uart_uart_driver, port);
++	rc = uart_add_one_port(&cdns_uart_uart_driver, port);
+ 	if (rc) {
+ 		dev_err(&pdev->dev,
+ 			"uart_add_one_port() failed; err=%i\n", rc);
+@@ -1702,13 +1600,15 @@ static int cdns_uart_probe(struct platform_device *pdev)
+ #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
+ 	/* This is not port which is used for console that's why clean it up */
+ 	if (console_port == port &&
+-	    !(cdns_uart_uart_driver->cons->flags & CON_ENABLED))
++	    !(cdns_uart_uart_driver.cons->flags & CON_ENABLED))
+ 		console_port = NULL;
+ #endif
+ 
+-	uartps_major = cdns_uart_uart_driver->tty_driver->major;
+ 	cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
+ 							     "cts-override");
++
++	instances++;
++
+ 	return 0;
+ 
+ err_out_pm_disable:
+@@ -1724,12 +1624,8 @@ err_out_clk_disable:
+ err_out_clk_dis_pclk:
+ 	clk_disable_unprepare(cdns_uart_data->pclk);
+ err_out_unregister_driver:
+-	uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
+-err_out_id:
+-	mutex_lock(&bitmap_lock);
+-	if (cdns_uart_data->id < MAX_UART_INSTANCES)
+-		clear_bit(cdns_uart_data->id, bitmap);
+-	mutex_unlock(&bitmap_lock);
++	if (!instances)
++		uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
+ 	return rc;
+ }
+ 
+@@ -1752,10 +1648,6 @@ static int cdns_uart_remove(struct platform_device *pdev)
+ #endif
+ 	rc = uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port);
+ 	port->mapbase = 0;
+-	mutex_lock(&bitmap_lock);
+-	if (cdns_uart_data->id < MAX_UART_INSTANCES)
+-		clear_bit(cdns_uart_data->id, bitmap);
+-	mutex_unlock(&bitmap_lock);
+ 	clk_disable_unprepare(cdns_uart_data->uartclk);
+ 	clk_disable_unprepare(cdns_uart_data->pclk);
+ 	pm_runtime_disable(&pdev->dev);
+@@ -1768,13 +1660,8 @@ static int cdns_uart_remove(struct platform_device *pdev)
+ 		console_port = NULL;
+ #endif
+ 
+-	/* If this is last instance major number should be initialized */
+-	mutex_lock(&bitmap_lock);
+-	if (bitmap_empty(bitmap, MAX_UART_INSTANCES))
+-		uartps_major = 0;
+-	mutex_unlock(&bitmap_lock);
+-
+-	uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
++	if (!--instances)
++		uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
+ 	return rc;
+ }
+ 
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index b99ac3ebb2b5..cc1a04191365 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -81,6 +81,7 @@
+ #include <linux/errno.h>
+ #include <linux/kd.h>
+ #include <linux/slab.h>
++#include <linux/vmalloc.h>
+ #include <linux/major.h>
+ #include <linux/mm.h>
+ #include <linux/console.h>
+@@ -350,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
+ 	/* allocate everything in one go */
+ 	memsize = cols * rows * sizeof(char32_t);
+ 	memsize += rows * sizeof(char32_t *);
+-	p = kmalloc(memsize, GFP_KERNEL);
++	p = vmalloc(memsize);
+ 	if (!p)
+ 		return NULL;
+ 
+@@ -366,7 +367,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
+ 
+ static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
+ {
+-	kfree(vc->vc_uni_screen);
++	vfree(vc->vc_uni_screen);
+ 	vc->vc_uni_screen = new_uniscr;
+ }
+ 
+@@ -1206,7 +1207,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
+ 	if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
+ 		return 0;
+ 
+-	if (new_screen_size > (4 << 20))
++	if (new_screen_size > KMALLOC_MAX_SIZE)
+ 		return -EINVAL;
+ 	newscreen = kzalloc(new_screen_size, GFP_USER);
+ 	if (!newscreen)
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 84d6f7df09a4..8ca72d80501d 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -412,9 +412,12 @@ static void acm_ctrl_irq(struct urb *urb)
+ 
+ exit:
+ 	retval = usb_submit_urb(urb, GFP_ATOMIC);
+-	if (retval && retval != -EPERM)
++	if (retval && retval != -EPERM && retval != -ENODEV)
+ 		dev_err(&acm->control->dev,
+ 			"%s - usb_submit_urb failed: %d\n", __func__, retval);
++	else
++		dev_vdbg(&acm->control->dev,
++			"control resubmission terminated %d\n", retval);
+ }
+ 
+ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
+@@ -430,6 +433,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
+ 			dev_err(&acm->data->dev,
+ 				"urb %d failed submission with %d\n",
+ 				index, res);
++		} else {
++			dev_vdbg(&acm->data->dev, "intended failure %d\n", res);
+ 		}
+ 		set_bit(index, &acm->read_urbs_free);
+ 		return res;
+@@ -471,6 +476,7 @@ static void acm_read_bulk_callback(struct urb *urb)
+ 	int status = urb->status;
+ 	bool stopped = false;
+ 	bool stalled = false;
++	bool cooldown = false;
+ 
+ 	dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
+ 		rb->index, urb->actual_length, status);
+@@ -497,6 +503,14 @@ static void acm_read_bulk_callback(struct urb *urb)
+ 			__func__, status);
+ 		stopped = true;
+ 		break;
++	case -EOVERFLOW:
++	case -EPROTO:
++		dev_dbg(&acm->data->dev,
++			"%s - cooling babbling device\n", __func__);
++		usb_mark_last_busy(acm->dev);
++		set_bit(rb->index, &acm->urbs_in_error_delay);
++		cooldown = true;
++		break;
+ 	default:
+ 		dev_dbg(&acm->data->dev,
+ 			"%s - nonzero urb status received: %d\n",
+@@ -518,9 +532,11 @@ static void acm_read_bulk_callback(struct urb *urb)
+ 	 */
+ 	smp_mb__after_atomic();
+ 
+-	if (stopped || stalled) {
++	if (stopped || stalled || cooldown) {
+ 		if (stalled)
+ 			schedule_work(&acm->work);
++		else if (cooldown)
++			schedule_delayed_work(&acm->dwork, HZ / 2);
+ 		return;
+ 	}
+ 
+@@ -557,14 +573,20 @@ static void acm_softint(struct work_struct *work)
+ 	struct acm *acm = container_of(work, struct acm, work);
+ 
+ 	if (test_bit(EVENT_RX_STALL, &acm->flags)) {
+-		if (!(usb_autopm_get_interface(acm->data))) {
++		smp_mb(); /* against acm_suspend() */
++		if (!acm->susp_count) {
+ 			for (i = 0; i < acm->rx_buflimit; i++)
+ 				usb_kill_urb(acm->read_urbs[i]);
+ 			usb_clear_halt(acm->dev, acm->in);
+ 			acm_submit_read_urbs(acm, GFP_KERNEL);
+-			usb_autopm_put_interface(acm->data);
++			clear_bit(EVENT_RX_STALL, &acm->flags);
+ 		}
+-		clear_bit(EVENT_RX_STALL, &acm->flags);
++	}
++
++	if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
++		for (i = 0; i < ACM_NR; i++)
++			if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
++					acm_submit_read_urb(acm, i, GFP_NOIO);
+ 	}
+ 
+ 	if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
+@@ -1333,6 +1355,7 @@ made_compressed_probe:
+ 	acm->readsize = readsize;
+ 	acm->rx_buflimit = num_rx_buf;
+ 	INIT_WORK(&acm->work, acm_softint);
++	INIT_DELAYED_WORK(&acm->dwork, acm_softint);
+ 	init_waitqueue_head(&acm->wioctl);
+ 	spin_lock_init(&acm->write_lock);
+ 	spin_lock_init(&acm->read_lock);
+@@ -1542,6 +1565,7 @@ static void acm_disconnect(struct usb_interface *intf)
+ 
+ 	acm_kill_urbs(acm);
+ 	cancel_work_sync(&acm->work);
++	cancel_delayed_work_sync(&acm->dwork);
+ 
+ 	tty_unregister_device(acm_tty_driver, acm->minor);
+ 
+@@ -1584,6 +1608,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
+ 
+ 	acm_kill_urbs(acm);
+ 	cancel_work_sync(&acm->work);
++	cancel_delayed_work_sync(&acm->dwork);
++	acm->urbs_in_error_delay = 0;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index ca1c026382c2..cd5e9d8ab237 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -109,8 +109,11 @@ struct acm {
+ #		define EVENT_TTY_WAKEUP	0
+ #		define EVENT_RX_STALL	1
+ #		define ACM_THROTTLED	2
++#		define ACM_ERROR_DELAY	3
++	unsigned long urbs_in_error_delay;		/* these need to be restarted after a delay */
+ 	struct usb_cdc_line_coding line;		/* bits, stop, parity */
+-	struct work_struct work;			/* work queue entry for line discipline waking up */
++	struct work_struct work;			/* work queue entry for various purposes*/
++	struct delayed_work dwork;			/* for cool downs needed in error recovery */
+ 	unsigned int ctrlin;				/* input control lines (DCD, DSR, RI, break, overruns) */
+ 	unsigned int ctrlout;				/* output control lines (DTR, RTS) */
+ 	struct async_icount iocount;			/* counters for control line changes */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 54cd8ef795ec..2b6565c06c23 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1223,6 +1223,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ #ifdef CONFIG_PM
+ 			udev->reset_resume = 1;
+ #endif
++			/* Don't set the change_bits when the device
++			 * was powered off.
++			 */
++			if (test_bit(port1, hub->power_bits))
++				set_bit(port1, hub->change_bits);
+ 
+ 		} else {
+ 			/* The power session is gone; tell hub_wq */
+@@ -2723,13 +2728,11 @@ static bool use_new_scheme(struct usb_device *udev, int retry,
+ {
+ 	int old_scheme_first_port =
+ 		port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME;
+-	int quick_enumeration = (udev->speed == USB_SPEED_HIGH);
+ 
+ 	if (udev->speed >= USB_SPEED_SUPER)
+ 		return false;
+ 
+-	return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first
+-			      || quick_enumeration);
++	return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first);
+ }
+ 
+ /* Is a USB 3.0 port in the Inactive or Compliance Mode state?
+@@ -3088,6 +3091,15 @@ static int check_port_resume_type(struct usb_device *udev,
+ 		if (portchange & USB_PORT_STAT_C_ENABLE)
+ 			usb_clear_port_feature(hub->hdev, port1,
+ 					USB_PORT_FEAT_C_ENABLE);
++
++		/*
++		 * Whatever made this reset-resume necessary may have
++		 * turned on the port1 bit in hub->change_bits.  But after
++		 * a successful reset-resume we want the bit to be clear;
++		 * if it was on it would indicate that something happened
++		 * following the reset-resume.
++		 */
++		clear_bit(port1, hub->change_bits);
+ 	}
+ 
+ 	return status;
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index 5adf489428aa..02eaac7e1e34 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -588,12 +588,13 @@ void usb_sg_cancel(struct usb_sg_request *io)
+ 	int i, retval;
+ 
+ 	spin_lock_irqsave(&io->lock, flags);
+-	if (io->status) {
++	if (io->status || io->count == 0) {
+ 		spin_unlock_irqrestore(&io->lock, flags);
+ 		return;
+ 	}
+ 	/* shut everything down */
+ 	io->status = -ECONNRESET;
++	io->count++;		/* Keep the request alive until we're done */
+ 	spin_unlock_irqrestore(&io->lock, flags);
+ 
+ 	for (i = io->entries - 1; i >= 0; --i) {
+@@ -607,6 +608,12 @@ void usb_sg_cancel(struct usb_sg_request *io)
+ 			dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
+ 				 __func__, retval);
+ 	}
++
++	spin_lock_irqsave(&io->lock, flags);
++	io->count--;
++	if (!io->count)
++		complete(&io->complete);
++	spin_unlock_irqrestore(&io->lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(usb_sg_cancel);
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index da30b5664ff3..3e8efe759c3e 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -430,6 +430,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Corsair K70 LUX */
+ 	{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
++	/* Corsair K70 RGB RAPDIFIRE */
++	{ USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
++	  USB_QUIRK_DELAY_CTRL_MSG },
++
+ 	/* MIDI keyboard WORLDE MINI */
+ 	{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 4d3c79d90a6e..9460d42f8675 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2484,14 +2484,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
+ 
+ static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
+ {
+-	/*
+-	 * For OUT direction, host may send less than the setup
+-	 * length. Return true for all OUT requests.
+-	 */
+-	if (!req->direction)
+-		return true;
+-
+-	return req->request.actual == req->request.length;
++	return req->num_pending_sgs == 0;
+ }
+ 
+ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
+@@ -2515,8 +2508,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
+ 
+ 	req->request.actual = req->request.length - req->remaining;
+ 
+-	if (!dwc3_gadget_ep_request_completed(req) ||
+-			req->num_pending_sgs) {
++	if (!dwc3_gadget_ep_request_completed(req)) {
+ 		__dwc3_gadget_kick_transfer(dep);
+ 		goto out;
+ 	}
+diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
+index 971c6b92484a..171280c80228 100644
+--- a/drivers/usb/early/xhci-dbc.c
++++ b/drivers/usb/early/xhci-dbc.c
+@@ -728,19 +728,19 @@ static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
+ 	case COMP_USB_TRANSACTION_ERROR:
+ 	case COMP_STALL_ERROR:
+ 	default:
+-		if (ep_id == XDBC_EPID_OUT)
++		if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL)
+ 			xdbc.flags |= XDBC_FLAGS_OUT_STALL;
+-		if (ep_id == XDBC_EPID_IN)
++		if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL)
+ 			xdbc.flags |= XDBC_FLAGS_IN_STALL;
+ 
+ 		xdbc_trace("endpoint %d stalled\n", ep_id);
+ 		break;
+ 	}
+ 
+-	if (ep_id == XDBC_EPID_IN) {
++	if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) {
+ 		xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
+ 		xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
+-	} else if (ep_id == XDBC_EPID_OUT) {
++	} else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) {
+ 		xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
+ 	} else {
+ 		xdbc_trace("invalid endpoint id %d\n", ep_id);
+diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
+index 673686eeddd7..6e2b7266a695 100644
+--- a/drivers/usb/early/xhci-dbc.h
++++ b/drivers/usb/early/xhci-dbc.h
+@@ -120,8 +120,22 @@ struct xdbc_ring {
+ 	u32			cycle_state;
+ };
+ 
+-#define XDBC_EPID_OUT		2
+-#define XDBC_EPID_IN		3
++/*
++ * These are the "Endpoint ID" (also known as "Context Index") values for the
++ * OUT Transfer Ring and the IN Transfer Ring of a Debug Capability Context data
++ * structure.
++ * According to the "eXtensible Host Controller Interface for Universal Serial
++ * Bus (xHCI)" specification, section "7.6.3.2 Endpoint Contexts and Transfer
++ * Rings", these should be 0 and 1, and those are the values AMD machines give
++ * you; but Intel machines seem to use the formula from section "4.5.1 Device
++ * Context Index", which is supposed to be used for the Device Context only.
++ * Luckily the values from Intel don't overlap with those from AMD, so we can
++ * just test for both.
++ */
++#define XDBC_EPID_OUT		0
++#define XDBC_EPID_IN		1
++#define XDBC_EPID_OUT_INTEL	2
++#define XDBC_EPID_IN_INTEL	3
+ 
+ struct xdbc_state {
+ 	u16			vendor;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 767f30b86645..edfb70874c46 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1813,6 +1813,10 @@ static void ffs_data_reset(struct ffs_data *ffs)
+ 	ffs->state = FFS_READ_DESCRIPTORS;
+ 	ffs->setup_state = FFS_NO_SETUP;
+ 	ffs->flags = 0;
++
++	ffs->ms_os_descs_ext_prop_count = 0;
++	ffs->ms_os_descs_ext_prop_name_len = 0;
++	ffs->ms_os_descs_ext_prop_data_len = 0;
+ }
+ 
+ 
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index af92b2576fe9..3196de2931b1 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1306,7 +1306,47 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 					 wIndex, link_state);
+ 				goto error;
+ 			}
++
++			/*
++			 * set link to U0, steps depend on current link state.
++			 * U3: set link to U0 and wait for u3exit completion.
++			 * U1/U2:  no PLC complete event, only set link to U0.
++			 * Resume/Recovery: device initiated U0, only wait for
++			 * completion
++			 */
++			if (link_state == USB_SS_PORT_LS_U0) {
++				u32 pls = temp & PORT_PLS_MASK;
++				bool wait_u0 = false;
++
++				/* already in U0 */
++				if (pls == XDEV_U0)
++					break;
++				if (pls == XDEV_U3 ||
++				    pls == XDEV_RESUME ||
++				    pls == XDEV_RECOVERY) {
++					wait_u0 = true;
++					reinit_completion(&bus_state->u3exit_done[wIndex]);
++				}
++				if (pls <= XDEV_U3) /* U1, U2, U3 */
++					xhci_set_link_state(xhci, ports[wIndex],
++							    USB_SS_PORT_LS_U0);
++				if (!wait_u0) {
++					if (pls > XDEV_U3)
++						goto error;
++					break;
++				}
++				spin_unlock_irqrestore(&xhci->lock, flags);
++				if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex],
++								 msecs_to_jiffies(100)))
++					xhci_dbg(xhci, "missing U0 port change event for port %d\n",
++						 wIndex);
++				spin_lock_irqsave(&xhci->lock, flags);
++				temp = readl(ports[wIndex]->addr);
++				break;
++			}
++
+ 			if (link_state == USB_SS_PORT_LS_U3) {
++				int retries = 16;
+ 				slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ 						wIndex + 1);
+ 				if (slot_id) {
+@@ -1317,17 +1357,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 					xhci_stop_device(xhci, slot_id, 1);
+ 					spin_lock_irqsave(&xhci->lock, flags);
+ 				}
+-			}
+-
+-			xhci_set_link_state(xhci, ports[wIndex], link_state);
+-
+-			spin_unlock_irqrestore(&xhci->lock, flags);
+-			msleep(20); /* wait device to enter */
+-			spin_lock_irqsave(&xhci->lock, flags);
+-
+-			temp = readl(ports[wIndex]->addr);
+-			if (link_state == USB_SS_PORT_LS_U3)
++				xhci_set_link_state(xhci, ports[wIndex], USB_SS_PORT_LS_U3);
++				spin_unlock_irqrestore(&xhci->lock, flags);
++				while (retries--) {
++					usleep_range(4000, 8000);
++					temp = readl(ports[wIndex]->addr);
++					if ((temp & PORT_PLS_MASK) == XDEV_U3)
++						break;
++				}
++				spin_lock_irqsave(&xhci->lock, flags);
++				temp = readl(ports[wIndex]->addr);
+ 				bus_state->suspended_ports |= 1 << wIndex;
++			}
+ 			break;
+ 		case USB_PORT_FEAT_POWER:
+ 			/*
+@@ -1528,6 +1569,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ 		}
+ 		if ((temp & PORT_RC))
+ 			reset_change = true;
++		if (temp & PORT_OC)
++			status = 1;
+ 	}
+ 	if (!status && !reset_change) {
+ 		xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+@@ -1593,6 +1636,13 @@ retry:
+ 				 port_index);
+ 			goto retry;
+ 		}
++		/* bail out if port detected a over-current condition */
++		if (t1 & PORT_OC) {
++			bus_state->bus_suspended = 0;
++			spin_unlock_irqrestore(&xhci->lock, flags);
++			xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n");
++			return -EBUSY;
++		}
+ 		/* suspend ports in U0, or bail out for new connect changes */
+ 		if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
+ 			if ((t1 & PORT_CSC) && wake_enabled) {
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 884c601bfa15..9764122c9cdf 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -2552,6 +2552,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 		xhci->usb3_rhub.bus_state.resume_done[i] = 0;
+ 		/* Only the USB 2.0 completions will ever be used. */
+ 		init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
++		init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
+ 	}
+ 
+ 	if (scratchpad_alloc(xhci, flags))
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index d23f7408c81f..2fbc00c0a6e8 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -547,6 +547,23 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ 				stream_id);
+ 		return;
+ 	}
++	/*
++	 * A cancelled TD can complete with a stall if HW cached the trb.
++	 * In this case driver can't find cur_td, but if the ring is empty we
++	 * can move the dequeue pointer to the current enqueue position.
++	 */
++	if (!cur_td) {
++		if (list_empty(&ep_ring->td_list)) {
++			state->new_deq_seg = ep_ring->enq_seg;
++			state->new_deq_ptr = ep_ring->enqueue;
++			state->new_cycle_state = ep_ring->cycle_state;
++			goto done;
++		} else {
++			xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n");
++			return;
++		}
++	}
++
+ 	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ 			"Finding endpoint context");
+@@ -592,6 +609,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ 	state->new_deq_seg = new_seg;
+ 	state->new_deq_ptr = new_deq;
+ 
++done:
+ 	/* Don't update the ring cycle state for the producer (us). */
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ 			"Cycle state = 0x%x", state->new_cycle_state);
+@@ -1677,6 +1695,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 	     (portsc & PORT_PLS_MASK) == XDEV_U1 ||
+ 	     (portsc & PORT_PLS_MASK) == XDEV_U2)) {
+ 		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
++		complete(&bus_state->u3exit_done[hcd_portnum]);
+ 		/* We've just brought the device into U0/1/2 through either the
+ 		 * Resume state after a device remote wakeup, or through the
+ 		 * U3Exit state after a host-initiated resume.  If it's a device
+@@ -1851,8 +1870,8 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
+ 
+ 	if (reset_type == EP_HARD_RESET) {
+ 		ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
+-		xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td);
+-		xhci_clear_hub_tt_buffer(xhci, td, ep);
++		xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id,
++					  td);
+ 	}
+ 	xhci_ring_cmd_db(xhci);
+ }
+@@ -1973,11 +1992,18 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 	if (trb_comp_code == COMP_STALL_ERROR ||
+ 		xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+ 						trb_comp_code)) {
+-		/* Issue a reset endpoint command to clear the host side
+-		 * halt, followed by a set dequeue command to move the
+-		 * dequeue pointer past the TD.
+-		 * The class driver clears the device side halt later.
++		/*
++		 * xhci internal endpoint state will go to a "halt" state for
++		 * any stall, including default control pipe protocol stall.
++		 * To clear the host side halt we need to issue a reset endpoint
++		 * command, followed by a set dequeue command to move past the
++		 * TD.
++		 * Class drivers clear the device side halt from a functional
++		 * stall later. Hub TT buffer should only be cleared for FS/LS
++		 * devices behind HS hubs for functional stalls.
+ 		 */
++		if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR))
++			xhci_clear_hub_tt_buffer(xhci, td, ep);
+ 		xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
+ 					ep_ring->stream_id, td, EP_HARD_RESET);
+ 	} else {
+@@ -2530,6 +2556,15 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 				xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
+ 					 slot_id, ep_index);
+ 			}
++			if (trb_comp_code == COMP_STALL_ERROR ||
++			    xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
++							      trb_comp_code)) {
++				xhci_cleanup_halted_endpoint(xhci, slot_id,
++							     ep_index,
++							     ep_ring->stream_id,
++							     NULL,
++							     EP_HARD_RESET);
++			}
+ 			goto cleanup;
+ 		}
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index fe38275363e0..bee5deccc83d 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3031,19 +3031,19 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+ 			added_ctxs, added_ctxs);
+ }
+ 
+-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
+-			       unsigned int stream_id, struct xhci_td *td)
++void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
++			       unsigned int ep_index, unsigned int stream_id,
++			       struct xhci_td *td)
+ {
+ 	struct xhci_dequeue_state deq_state;
+-	struct usb_device *udev = td->urb->dev;
+ 
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ 			"Cleaning up stalled endpoint ring");
+ 	/* We need to move the HW's dequeue pointer past this TD,
+ 	 * or it will attempt to resend it on the next doorbell ring.
+ 	 */
+-	xhci_find_new_dequeue_state(xhci, udev->slot_id,
+-			ep_index, stream_id, td, &deq_state);
++	xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td,
++				    &deq_state);
+ 
+ 	if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
+ 		return;
+@@ -3054,7 +3054,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
+ 	if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ 				"Queueing new dequeue state");
+-		xhci_queue_new_dequeue_state(xhci, udev->slot_id,
++		xhci_queue_new_dequeue_state(xhci, slot_id,
+ 				ep_index, &deq_state);
+ 	} else {
+ 		/* Better hope no one uses the input context between now and the
+@@ -3065,7 +3065,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ 				"Setting up input context for "
+ 				"configure endpoint command");
+-		xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
++		xhci_setup_input_ctx_for_quirk(xhci, slot_id,
+ 				ep_index, &deq_state);
+ 	}
+ }
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 3ecee10fdcdc..02f972e464ab 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1694,6 +1694,7 @@ struct xhci_bus_state {
+ 	/* Which ports are waiting on RExit to U0 transition. */
+ 	unsigned long		rexit_ports;
+ 	struct completion	rexit_done[USB_MAXCHILDREN];
++	struct completion	u3exit_done[USB_MAXCHILDREN];
+ };
+ 
+ 
+@@ -2115,8 +2116,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+ 		unsigned int slot_id, unsigned int ep_index,
+ 		struct xhci_dequeue_state *deq_state);
+-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
+-		unsigned int stream_id, struct xhci_td *td);
++void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
++			       unsigned int ep_index, unsigned int stream_id,
++			       struct xhci_td *td);
+ void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
+ void xhci_handle_command_timeout(struct work_struct *work);
+ 
+diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
+index 2ab9600d0898..fc8a5da4a07c 100644
+--- a/drivers/usb/misc/sisusbvga/sisusb.c
++++ b/drivers/usb/misc/sisusbvga/sisusb.c
+@@ -1199,18 +1199,18 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
+ /* High level: Gfx (indexed) register access */
+ 
+ #ifdef CONFIG_USB_SISUSBVGA_CON
+-int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data)
++int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data)
+ {
+ 	return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
+ }
+ 
+-int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data)
++int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data)
+ {
+ 	return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
+ }
+ #endif
+ 
+-int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
++int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
+ 		u8 index, u8 data)
+ {
+ 	int ret;
+@@ -1220,7 +1220,7 @@ int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
+ 	return ret;
+ }
+ 
+-int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
++int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
+ 		u8 index, u8 *data)
+ {
+ 	int ret;
+@@ -1230,7 +1230,7 @@ int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
+ 	return ret;
+ }
+ 
+-int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
++int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx,
+ 		u8 myand, u8 myor)
+ {
+ 	int ret;
+@@ -1245,7 +1245,7 @@ int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
+ }
+ 
+ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
+-		int port, u8 idx, u8 data, u8 mask)
++		u32 port, u8 idx, u8 data, u8 mask)
+ {
+ 	int ret;
+ 	u8 tmp;
+@@ -1258,13 +1258,13 @@ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
+ 	return ret;
+ }
+ 
+-int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
++int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
+ 		u8 index, u8 myor)
+ {
+ 	return sisusb_setidxregandor(sisusb, port, index, 0xff, myor);
+ }
+ 
+-int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
++int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
+ 		u8 idx, u8 myand)
+ {
+ 	return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00);
+@@ -2785,8 +2785,8 @@ static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig)
+ static int sisusb_handle_command(struct sisusb_usb_data *sisusb,
+ 		struct sisusb_command *y, unsigned long arg)
+ {
+-	int	retval, port, length;
+-	u32	address;
++	int	retval, length;
++	u32	port, address;
+ 
+ 	/* All our commands require the device
+ 	 * to be initialized.
+diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
+index 1782c759c4ad..ace09985dae4 100644
+--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
++++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
+@@ -812,17 +812,17 @@ static const struct SiS_VCLKData SiSUSB_VCLKData[] = {
+ int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo);
+ int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo);
+ 
+-extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data);
+-extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data);
+-extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
++extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data);
++extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data);
++extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
+ 			    u8 index, u8 data);
+-extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
++extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
+ 			    u8 index, u8 * data);
+-extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port,
++extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port,
+ 				 u8 idx, u8 myand, u8 myor);
+-extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
++extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
+ 			      u8 index, u8 myor);
+-extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
++extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
+ 			       u8 idx, u8 myand);
+ 
+ void sisusb_delete(struct kref *kref);
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 3670fda02c34..d592071119ba 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -81,6 +81,19 @@ static void uas_free_streams(struct uas_dev_info *devinfo);
+ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
+ 				int status);
+ 
++/*
++ * This driver needs its own workqueue, as we need to control memory allocation.
++ *
++ * In the course of error handling and power management uas_wait_for_pending_cmnds()
++ * needs to flush pending work items. In these contexts we cannot allocate memory
++ * by doing block IO as we would deadlock. For the same reason we cannot wait
++ * for anything allocating memory not heeding these constraints.
++ *
++ * So we have to control all work items that can be on the workqueue we flush.
++ * Hence we cannot share a queue and need our own.
++ */
++static struct workqueue_struct *workqueue;
++
+ static void uas_do_work(struct work_struct *work)
+ {
+ 	struct uas_dev_info *devinfo =
+@@ -109,7 +122,7 @@ static void uas_do_work(struct work_struct *work)
+ 		if (!err)
+ 			cmdinfo->state &= ~IS_IN_WORK_LIST;
+ 		else
+-			schedule_work(&devinfo->work);
++			queue_work(workqueue, &devinfo->work);
+ 	}
+ out:
+ 	spin_unlock_irqrestore(&devinfo->lock, flags);
+@@ -134,7 +147,7 @@ static void uas_add_work(struct uas_cmd_info *cmdinfo)
+ 
+ 	lockdep_assert_held(&devinfo->lock);
+ 	cmdinfo->state |= IS_IN_WORK_LIST;
+-	schedule_work(&devinfo->work);
++	queue_work(workqueue, &devinfo->work);
+ }
+ 
+ static void uas_zap_pending(struct uas_dev_info *devinfo, int result)
+@@ -190,6 +203,9 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
+ 	struct uas_cmd_info *ci = (void *)&cmnd->SCp;
+ 	struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ 
++	if (status == -ENODEV) /* too late */
++		return;
++
+ 	scmd_printk(KERN_INFO, cmnd,
+ 		    "%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ",
+ 		    prefix, status, cmdinfo->uas_tag,
+@@ -1226,7 +1242,31 @@ static struct usb_driver uas_driver = {
+ 	.id_table = uas_usb_ids,
+ };
+ 
+-module_usb_driver(uas_driver);
++static int __init uas_init(void)
++{
++	int rv;
++
++	workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0);
++	if (!workqueue)
++		return -ENOMEM;
++
++	rv = usb_register(&uas_driver);
++	if (rv) {
++		destroy_workqueue(workqueue);
++		return -ENOMEM;
++	}
++
++	return 0;
++}
++
++static void __exit uas_exit(void)
++{
++	usb_deregister(&uas_driver);
++	destroy_workqueue(workqueue);
++}
++
++module_init(uas_init);
++module_exit(uas_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_IMPORT_NS(USB_STORAGE);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 1880f3e13f57..f6c3681fa2e9 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2323,6 +2323,13 @@ UNUSUAL_DEV(  0x3340, 0xffff, 0x0000, 0x0000,
+ 		USB_SC_DEVICE,USB_PR_DEVICE,NULL,
+ 		US_FL_MAX_SECTORS_64 ),
+ 
++/* Reported by Cyril Roelandt <tipecaml@gmail.com> */
++UNUSUAL_DEV(  0x357d, 0x7788, 0x0114, 0x0114,
++		"JMicron",
++		"USB to ATA/ATAPI Bridge",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_BROKEN_FUA ),
++
+ /* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
+ UNUSUAL_DEV(  0x4102, 0x1020, 0x0100,  0x0100,
+ 		"iRiver",
+diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
+index 2e45eb479386..f241037df7cb 100644
+--- a/drivers/usb/typec/bus.c
++++ b/drivers/usb/typec/bus.c
+@@ -208,7 +208,10 @@ EXPORT_SYMBOL_GPL(typec_altmode_vdm);
+ const struct typec_altmode *
+ typec_altmode_get_partner(struct typec_altmode *adev)
+ {
+-	return adev ? &to_altmode(adev)->partner->adev : NULL;
++	if (!adev || !to_altmode(adev)->partner)
++		return NULL;
++
++	return &to_altmode(adev)->partner->adev;
+ }
+ EXPORT_SYMBOL_GPL(typec_altmode_get_partner);
+ 
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index f3087ef8265c..c033dfb2dd8a 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -3759,6 +3759,14 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
+ 		 */
+ 		break;
+ 
++	case PORT_RESET:
++	case PORT_RESET_WAIT_OFF:
++		/*
++		 * State set back to default mode once the timer completes.
++		 * Ignore CC changes here.
++		 */
++		break;
++
+ 	default:
+ 		if (tcpm_port_is_disconnected(port))
+ 			tcpm_set_state(port, unattached_state(port), 0);
+@@ -3820,6 +3828,15 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port)
+ 	case SRC_TRY_DEBOUNCE:
+ 		/* Do nothing, waiting for sink detection */
+ 		break;
++
++	case PORT_RESET:
++	case PORT_RESET_WAIT_OFF:
++		/*
++		 * State set back to default mode once the timer completes.
++		 * Ignore vbus changes here.
++		 */
++		break;
++
+ 	default:
+ 		break;
+ 	}
+@@ -3873,10 +3890,19 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
+ 	case PORT_RESET_WAIT_OFF:
+ 		tcpm_set_state(port, tcpm_default_state(port), 0);
+ 		break;
++
+ 	case SRC_TRY_WAIT:
+ 	case SRC_TRY_DEBOUNCE:
+ 		/* Do nothing, waiting for sink detection */
+ 		break;
++
++	case PORT_RESET:
++		/*
++		 * State set back to default mode once the timer completes.
++		 * Ignore vbus changes here.
++		 */
++		break;
++
+ 	default:
+ 		if (port->pwr_role == TYPEC_SINK &&
+ 		    port->attached)
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index 8b5c742f24e8..7e4cd34a8c20 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -282,6 +282,7 @@ static int watchdog_start(struct watchdog_device *wdd)
+ 	if (err == 0) {
+ 		set_bit(WDOG_ACTIVE, &wdd->status);
+ 		wd_data->last_keepalive = started_at;
++		wd_data->last_hw_keepalive = started_at;
+ 		watchdog_update_worker(wdd);
+ 	}
+ 
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 28ae0c134700..d050acc1fd5d 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1973,8 +1973,12 @@ retry_locked:
+ 		}
+ 
+ 		/* want more caps from mds? */
+-		if (want & ~(cap->mds_wanted | cap->issued))
+-			goto ack;
++		if (want & ~cap->mds_wanted) {
++			if (want & ~(cap->mds_wanted | cap->issued))
++				goto ack;
++			if (!__cap_is_valid(cap))
++				goto ack;
++		}
+ 
+ 		/* things we might delay */
+ 		if ((cap->issued & ~retain) == 0)
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index b6bfa94332c3..79dc06881e78 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -315,6 +315,11 @@ static struct dentry *__get_parent(struct super_block *sb,
+ 
+ 	req->r_num_caps = 1;
+ 	err = ceph_mdsc_do_request(mdsc, NULL, req);
++	if (err) {
++		ceph_mdsc_put_request(req);
++		return ERR_PTR(err);
++	}
++
+ 	inode = req->r_target_inode;
+ 	if (inode)
+ 		ihold(inode);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 788344b5949e..cd0e7f5005cb 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -693,6 +693,11 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
+ 	if (smb3_encryption_required(tcon))
+ 		flags |= CIFS_TRANSFORM_REQ;
+ 
++	if (!server->ops->new_lease_key)
++		return -EIO;
++
++	server->ops->new_lease_key(pfid);
++
+ 	memset(rqst, 0, sizeof(rqst));
+ 	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
+ 	memset(rsp_iov, 0, sizeof(rsp_iov));
+diff --git a/fs/coredump.c b/fs/coredump.c
+index f8296a82d01d..408418e6aa13 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -211,6 +211,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
+ 			return -ENOMEM;
+ 		(*argv)[(*argc)++] = 0;
+ 		++pat_ptr;
++		if (!(*pat_ptr))
++			return -ENOMEM;
+ 	}
+ 
+ 	/* Repeat as long as we have more pattern to process and more output
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index c3b11a715082..5cf91322de0f 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -1312,6 +1312,7 @@ nfsd4_run_cb_work(struct work_struct *work)
+ 		container_of(work, struct nfsd4_callback, cb_work);
+ 	struct nfs4_client *clp = cb->cb_clp;
+ 	struct rpc_clnt *clnt;
++	int flags;
+ 
+ 	if (cb->cb_need_restart) {
+ 		cb->cb_need_restart = false;
+@@ -1340,7 +1341,8 @@ nfsd4_run_cb_work(struct work_struct *work)
+ 	}
+ 
+ 	cb->cb_msg.rpc_cred = clp->cl_cb_cred;
+-	rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
++	flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN;
++	rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
+ 			cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
+ }
+ 
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index 7dc800cce354..c663202da8de 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -266,7 +266,8 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
+ 		if (start < offset + dump->size) {
+ 			tsz = min(offset + (u64)dump->size - start, (u64)size);
+ 			buf = dump->buf + start - offset;
+-			if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
++			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
++							tsz)) {
+ 				ret = -EFAULT;
+ 				goto out_unlock;
+ 			}
+@@ -624,7 +625,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
+ 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
+ 		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
+ 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
+-						kaddr, tsz))
++						kaddr, 0, tsz))
+ 			goto fail;
+ 
+ 		size -= tsz;
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 2094386af8ac..68fea439d974 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1861,7 +1861,8 @@ xfs_init_zones(void)
+ 
+ 	xfs_ili_zone = kmem_cache_create("xfs_ili",
+ 					 sizeof(struct xfs_inode_log_item), 0,
+-					 SLAB_MEM_SPREAD, NULL);
++					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
++					 NULL);
+ 	if (!xfs_ili_zone)
+ 		goto out_destroy_inode_zone;
+ 
+diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
+index 862ce0019eba..d91c1e21dc70 100644
+--- a/include/linux/iio/iio.h
++++ b/include/linux/iio/iio.h
+@@ -598,7 +598,7 @@ void iio_device_unregister(struct iio_dev *indio_dev);
+  * 0 on success, negative error number on failure.
+  */
+ #define devm_iio_device_register(dev, indio_dev) \
+-	__devm_iio_device_register((dev), (indio_dev), THIS_MODULE);
++	__devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
+ int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
+ 			       struct module *this_mod);
+ void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index bcb9b2ac0791..b2a7159f66da 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -1039,7 +1039,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+ 			start = slot + 1;
+ 	}
+ 
+-	if (gfn >= memslots[start].base_gfn &&
++	if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
+ 	    gfn < memslots[start].base_gfn + memslots[start].npages) {
+ 		atomic_set(&slots->lru_slot, start);
+ 		return &memslots[start];
+diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
+index 0507a162ccd0..a95d3cc74d79 100644
+--- a/include/linux/vmalloc.h
++++ b/include/linux/vmalloc.h
+@@ -137,7 +137,7 @@ extern void vunmap(const void *addr);
+ 
+ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
+ 				       unsigned long uaddr, void *kaddr,
+-				       unsigned long size);
++				       unsigned long pgoff, unsigned long size);
+ 
+ extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ 							unsigned long pgoff);
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 77e6b5a83b06..eec6d0a6ae61 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -5969,7 +5969,9 @@ enum rate_control_capabilities {
+ struct rate_control_ops {
+ 	unsigned long capa;
+ 	const char *name;
+-	void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
++	void *(*alloc)(struct ieee80211_hw *hw);
++	void (*add_debugfs)(struct ieee80211_hw *hw, void *priv,
++			    struct dentry *debugfsdir);
+ 	void (*free)(void *priv);
+ 
+ 	void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index a5ea27df3c2b..2edb73c27962 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -51,7 +51,7 @@ extern struct inet_hashinfo tcp_hashinfo;
+ extern struct percpu_counter tcp_orphan_count;
+ void tcp_time_wait(struct sock *sk, int state, int timeo);
+ 
+-#define MAX_TCP_HEADER	(128 + MAX_HEADER)
++#define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
+ #define MAX_TCP_OPTION_SPACE 40
+ #define TCP_MIN_SND_MSS		48
+ #define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
+diff --git a/ipc/util.c b/ipc/util.c
+index fe61df53775a..2d70f25f64b8 100644
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -764,13 +764,13 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
+ 			total++;
+ 	}
+ 
++	*new_pos = pos + 1;
+ 	if (total >= ids->in_use)
+ 		return NULL;
+ 
+ 	for (; pos < ipc_mni; pos++) {
+ 		ipc = idr_find(&ids->ipcs_idr, pos);
+ 		if (ipc != NULL) {
+-			*new_pos = pos + 1;
+ 			rcu_read_lock();
+ 			ipc_lock_object(ipc);
+ 			return ipc;
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 9ddfe2aa6671..7fe3b69bc02a 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1326,6 +1326,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
+ 		if (!audit_enabled && msg_type != AUDIT_USER_AVC)
+ 			return 0;
++		/* exit early if there isn't at least one character to print */
++		if (data_len < 2)
++			return -EINVAL;
+ 
+ 		err = audit_filter(msg_type, AUDIT_FILTER_USER);
+ 		if (err == 1) { /* match or error */
+diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
+index ac7956c38f69..4b24275e306a 100644
+--- a/kernel/dma/direct.c
++++ b/kernel/dma/direct.c
+@@ -39,7 +39,8 @@ static inline struct page *dma_direct_to_page(struct device *dev,
+ 
+ u64 dma_direct_get_required_mask(struct device *dev)
+ {
+-	u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
++	phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
++	u64 max_dma = phys_to_dma_direct(dev, phys);
+ 
+ 	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 243717177f44..533c19348189 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6734,9 +6734,12 @@ static u64 perf_virt_to_phys(u64 virt)
+ 		 * Try IRQ-safe __get_user_pages_fast first.
+ 		 * If failed, leave phys_addr as 0.
+ 		 */
+-		if ((current->mm != NULL) &&
+-		    (__get_user_pages_fast(virt, 1, 0, &p) == 1))
+-			phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
++		if (current->mm != NULL) {
++			pagefault_disable();
++			if (__get_user_pages_fast(virt, 1, 0, &p) == 1)
++				phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
++			pagefault_enable();
++		}
+ 
+ 		if (p)
+ 			put_page(p);
+diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
+index e5eb5ea7ea59..cc4ee482d3fb 100644
+--- a/kernel/gcov/fs.c
++++ b/kernel/gcov/fs.c
+@@ -108,9 +108,9 @@ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos)
+ {
+ 	struct gcov_iterator *iter = data;
+ 
++	(*pos)++;
+ 	if (gcov_iter_next(iter))
+ 		return NULL;
+-	(*pos)++;
+ 
+ 	return iter;
+ }
+diff --git a/kernel/signal.c b/kernel/signal.c
+index e58a6c619824..7938c60e11dd 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -1993,8 +1993,12 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
+ 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
+ 			sig = 0;
+ 	}
++	/*
++	 * Send with __send_signal as si_pid and si_uid are in the
++	 * parent's namespaces.
++	 */
+ 	if (valid_signal(sig) && sig)
+-		__group_send_sig_info(sig, &info, tsk->parent);
++		__send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
+ 	__wake_up_parent(tsk, tsk->parent);
+ 	spin_unlock_irqrestore(&psig->siglock, flags);
+ 
+diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
+index 3ab8720aa2f8..b9e6c3648be1 100644
+--- a/lib/raid6/test/Makefile
++++ b/lib/raid6/test/Makefile
+@@ -35,13 +35,13 @@ endif
+ ifeq ($(IS_X86),yes)
+         OBJS   += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
+         CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" |		\
+-                    gcc -c -x assembler - >&/dev/null &&	\
++                    gcc -c -x assembler - >/dev/null 2>&1 &&	\
+                     rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
+         CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" |	\
+-                    gcc -c -x assembler - >&/dev/null &&	\
++                    gcc -c -x assembler - >/dev/null 2>&1 &&	\
+                     rm ./-.o && echo -DCONFIG_AS_AVX2=1)
+ 	CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" |          \
+-		    gcc -c -x assembler - >&/dev/null &&        \
++		    gcc -c -x assembler - >/dev/null 2>&1 &&	\
+ 		    rm ./-.o && echo -DCONFIG_AS_AVX512=1)
+ else ifeq ($(HAS_NEON),yes)
+         OBJS   += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index dd8737a94bec..0366085f37ed 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4910,8 +4910,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
+ {
+ 	pgd_t *pgd;
+ 	p4d_t *p4d;
+-	pud_t *pud;
+-	pmd_t *pmd;
++	pud_t *pud, pud_entry;
++	pmd_t *pmd, pmd_entry;
+ 
+ 	pgd = pgd_offset(mm, addr);
+ 	if (!pgd_present(*pgd))
+@@ -4921,17 +4921,19 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
+ 		return NULL;
+ 
+ 	pud = pud_offset(p4d, addr);
+-	if (sz != PUD_SIZE && pud_none(*pud))
++	pud_entry = READ_ONCE(*pud);
++	if (sz != PUD_SIZE && pud_none(pud_entry))
+ 		return NULL;
+ 	/* hugepage or swap? */
+-	if (pud_huge(*pud) || !pud_present(*pud))
++	if (pud_huge(pud_entry) || !pud_present(pud_entry))
+ 		return (pte_t *)pud;
+ 
+ 	pmd = pmd_offset(pud, addr);
+-	if (sz != PMD_SIZE && pmd_none(*pmd))
++	pmd_entry = READ_ONCE(*pmd);
++	if (sz != PMD_SIZE && pmd_none(pmd_entry))
+ 		return NULL;
+ 	/* hugepage or swap? */
+-	if (pmd_huge(*pmd) || !pmd_present(*pmd))
++	if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry))
+ 		return (pte_t *)pmd;
+ 
+ 	return NULL;
+diff --git a/mm/ksm.c b/mm/ksm.c
+index d17c7d57d0d8..c55b89da4f55 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -2112,8 +2112,16 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
+ 
+ 		down_read(&mm->mmap_sem);
+ 		vma = find_mergeable_vma(mm, rmap_item->address);
+-		err = try_to_merge_one_page(vma, page,
+-					    ZERO_PAGE(rmap_item->address));
++		if (vma) {
++			err = try_to_merge_one_page(vma, page,
++					ZERO_PAGE(rmap_item->address));
++		} else {
++			/*
++			 * If the vma is out of date, we do not need to
++			 * continue.
++			 */
++			err = 0;
++		}
+ 		up_read(&mm->mmap_sem);
+ 		/*
+ 		 * In case of failure, the page was not really empty, so we
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 4bb30ed6c8d2..8cbd8c1bfe15 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -27,6 +27,7 @@
+ #include <linux/swapops.h>
+ #include <linux/shmem_fs.h>
+ #include <linux/mmu_notifier.h>
++#include <linux/sched/mm.h>
+ 
+ #include <asm/tlb.h>
+ 
+@@ -1090,6 +1091,23 @@ int do_madvise(unsigned long start, size_t len_in, int behavior)
+ 	if (write) {
+ 		if (down_write_killable(&current->mm->mmap_sem))
+ 			return -EINTR;
++
++		/*
++		 * We may have stolen the mm from another process
++		 * that is undergoing core dumping.
++		 *
++		 * Right now that's io_ring, in the future it may
++		 * be remote process management and not "current"
++		 * at all.
++		 *
++		 * We need to fix core dumping to not do this,
++		 * but for now we have the mmget_still_valid()
++		 * model.
++		 */
++		if (!mmget_still_valid(current->mm)) {
++			up_write(&current->mm->mmap_sem);
++			return -EINTR;
++		}
+ 	} else {
+ 		down_read(&current->mm->mmap_sem);
+ 	}
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 6b8eeb0ecee5..cf39e15242c1 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -34,6 +34,7 @@
+ #include <linux/llist.h>
+ #include <linux/bitops.h>
+ #include <linux/rbtree_augmented.h>
++#include <linux/overflow.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/tlbflush.h>
+@@ -3054,6 +3055,7 @@ finished:
+  * @vma:		vma to cover
+  * @uaddr:		target user address to start at
+  * @kaddr:		virtual address of vmalloc kernel memory
++ * @pgoff:		offset from @kaddr to start at
+  * @size:		size of map area
+  *
+  * Returns:	0 for success, -Exxx on failure
+@@ -3066,9 +3068,15 @@ finished:
+  * Similar to remap_pfn_range() (see mm/memory.c)
+  */
+ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
+-				void *kaddr, unsigned long size)
++				void *kaddr, unsigned long pgoff,
++				unsigned long size)
+ {
+ 	struct vm_struct *area;
++	unsigned long off;
++	unsigned long end_index;
++
++	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
++		return -EINVAL;
+ 
+ 	size = PAGE_ALIGN(size);
+ 
+@@ -3082,8 +3090,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
+ 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
+ 		return -EINVAL;
+ 
+-	if (kaddr + size > area->addr + get_vm_area_size(area))
++	if (check_add_overflow(size, off, &end_index) ||
++	    end_index > get_vm_area_size(area))
+ 		return -EINVAL;
++	kaddr += off;
+ 
+ 	do {
+ 		struct page *page = vmalloc_to_page(kaddr);
+@@ -3122,7 +3132,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ 						unsigned long pgoff)
+ {
+ 	return remap_vmalloc_range_partial(vma, vma->vm_start,
+-					   addr + (pgoff << PAGE_SHIFT),
++					   addr, pgoff,
+ 					   vma->vm_end - vma->vm_start);
+ }
+ EXPORT_SYMBOL(remap_vmalloc_range);
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index a803cdd9400a..ee0f3b2823e0 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -2012,7 +2012,7 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
+ 
+ 	hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
+ 		struct fib_info *next_fi = fa->fa_info;
+-		struct fib_nh *nh;
++		struct fib_nh_common *nhc;
+ 
+ 		if (fa->fa_slen != slen)
+ 			continue;
+@@ -2035,8 +2035,8 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
+ 		    fa->fa_type != RTN_UNICAST)
+ 			continue;
+ 
+-		nh = fib_info_nh(next_fi, 0);
+-		if (!nh->fib_nh_gw4 || nh->fib_nh_scope != RT_SCOPE_LINK)
++		nhc = fib_info_nhc(next_fi, 0);
++		if (!nhc->nhc_gw_family || nhc->nhc_scope != RT_SCOPE_LINK)
+ 			continue;
+ 
+ 		fib_alias_accessed(fa);
+diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
+index 89ba7c87de5d..30ddb9dc9398 100644
+--- a/net/ipv4/xfrm4_output.c
++++ b/net/ipv4/xfrm4_output.c
+@@ -58,9 +58,7 @@ int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
+ {
+ 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ 
+-#ifdef CONFIG_NETFILTER
+ 	IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+-#endif
+ 
+ 	return xfrm_output(sk, skb);
+ }
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index debdaeba5d8c..18d05403d3b5 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -183,15 +183,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ 					retv = -EBUSY;
+ 					break;
+ 				}
+-			} else if (sk->sk_protocol == IPPROTO_TCP) {
+-				if (sk->sk_prot != &tcpv6_prot) {
+-					retv = -EBUSY;
+-					break;
+-				}
+-				break;
+-			} else {
++			}
++			if (sk->sk_protocol == IPPROTO_TCP &&
++			    sk->sk_prot != &tcpv6_prot) {
++				retv = -EBUSY;
+ 				break;
+ 			}
++			if (sk->sk_protocol != IPPROTO_TCP)
++				break;
+ 			if (sk->sk_state != TCP_ESTABLISHED) {
+ 				retv = -ENOTCONN;
+ 				break;
+diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
+index fbe51d40bd7e..e34167f790e6 100644
+--- a/net/ipv6/xfrm6_output.c
++++ b/net/ipv6/xfrm6_output.c
+@@ -111,9 +111,7 @@ int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb)
+ {
+ 	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ 
+-#ifdef CONFIG_NETFILTER
+ 	IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
+-#endif
+ 
+ 	return xfrm_output(sk, skb);
+ }
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index a14aef11ffb8..4945d6e6d133 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -1161,8 +1161,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 	local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
+ 				   IEEE80211_TX_STATUS_HEADROOM);
+ 
+-	debugfs_hw_add(local);
+-
+ 	/*
+ 	 * if the driver doesn't specify a max listen interval we
+ 	 * use 5 which should be a safe default
+@@ -1254,6 +1252,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 	if (result < 0)
+ 		goto fail_wiphy_register;
+ 
++	debugfs_hw_add(local);
++	rate_control_add_debugfs(local);
++
+ 	rtnl_lock();
+ 
+ 	/* add one default STA interface if supported */
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index a1e9fc7878aa..b051f125d3af 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -214,17 +214,16 @@ static ssize_t rcname_read(struct file *file, char __user *userbuf,
+ 				       ref->ops->name, len);
+ }
+ 
+-static const struct file_operations rcname_ops = {
++const struct file_operations rcname_ops = {
+ 	.read = rcname_read,
+ 	.open = simple_open,
+ 	.llseek = default_llseek,
+ };
+ #endif
+ 
+-static struct rate_control_ref *rate_control_alloc(const char *name,
+-					    struct ieee80211_local *local)
++static struct rate_control_ref *
++rate_control_alloc(const char *name, struct ieee80211_local *local)
+ {
+-	struct dentry *debugfsdir = NULL;
+ 	struct rate_control_ref *ref;
+ 
+ 	ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
+@@ -234,13 +233,7 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
+ 	if (!ref->ops)
+ 		goto free;
+ 
+-#ifdef CONFIG_MAC80211_DEBUGFS
+-	debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
+-	local->debugfs.rcdir = debugfsdir;
+-	debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops);
+-#endif
+-
+-	ref->priv = ref->ops->alloc(&local->hw, debugfsdir);
++	ref->priv = ref->ops->alloc(&local->hw);
+ 	if (!ref->priv)
+ 		goto free;
+ 	return ref;
+diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
+index 5397c6dad056..79b44d3db171 100644
+--- a/net/mac80211/rate.h
++++ b/net/mac80211/rate.h
+@@ -60,6 +60,29 @@ static inline void rate_control_add_sta_debugfs(struct sta_info *sta)
+ #endif
+ }
+ 
++extern const struct file_operations rcname_ops;
++
++static inline void rate_control_add_debugfs(struct ieee80211_local *local)
++{
++#ifdef CONFIG_MAC80211_DEBUGFS
++	struct dentry *debugfsdir;
++
++	if (!local->rate_ctrl)
++		return;
++
++	if (!local->rate_ctrl->ops->add_debugfs)
++		return;
++
++	debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
++	local->debugfs.rcdir = debugfsdir;
++	debugfs_create_file("name", 0400, debugfsdir,
++			    local->rate_ctrl, &rcname_ops);
++
++	local->rate_ctrl->ops->add_debugfs(&local->hw, local->rate_ctrl->priv,
++					   debugfsdir);
++#endif
++}
++
+ void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata);
+ 
+ /* Get a reference to the rate control algorithm. If `name' is NULL, get the
+diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
+index 694a31978a04..5dc3e5bc4e64 100644
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -1635,7 +1635,7 @@ minstrel_ht_init_cck_rates(struct minstrel_priv *mp)
+ }
+ 
+ static void *
+-minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
++minstrel_ht_alloc(struct ieee80211_hw *hw)
+ {
+ 	struct minstrel_priv *mp;
+ 
+@@ -1673,7 +1673,17 @@ minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+ 	mp->update_interval = HZ / 10;
+ 	mp->new_avg = true;
+ 
++	minstrel_ht_init_cck_rates(mp);
++
++	return mp;
++}
++
+ #ifdef CONFIG_MAC80211_DEBUGFS
++static void minstrel_ht_add_debugfs(struct ieee80211_hw *hw, void *priv,
++				    struct dentry *debugfsdir)
++{
++	struct minstrel_priv *mp = priv;
++
+ 	mp->fixed_rate_idx = (u32) -1;
+ 	debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir,
+ 			   &mp->fixed_rate_idx);
+@@ -1681,12 +1691,8 @@ minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+ 			   &mp->sample_switch);
+ 	debugfs_create_bool("new_avg", S_IRUGO | S_IWUSR, debugfsdir,
+ 			   &mp->new_avg);
+-#endif
+-
+-	minstrel_ht_init_cck_rates(mp);
+-
+-	return mp;
+ }
++#endif
+ 
+ static void
+ minstrel_ht_free(void *priv)
+@@ -1725,6 +1731,7 @@ static const struct rate_control_ops mac80211_minstrel_ht = {
+ 	.alloc = minstrel_ht_alloc,
+ 	.free = minstrel_ht_free,
+ #ifdef CONFIG_MAC80211_DEBUGFS
++	.add_debugfs = minstrel_ht_add_debugfs,
+ 	.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
+ #endif
+ 	.get_expected_throughput = minstrel_ht_get_expected_throughput,
+diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
+index d41335bad1f8..89cd9de21594 100644
+--- a/net/netrom/nr_route.c
++++ b/net/netrom/nr_route.c
+@@ -208,6 +208,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
+ 		/* refcount initialized at 1 */
+ 		spin_unlock_bh(&nr_node_list_lock);
+ 
++		nr_neigh_put(nr_neigh);
+ 		return 0;
+ 	}
+ 	nr_node_lock(nr_node);
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index e726159cfcfa..4340f25fe390 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1895,7 +1895,8 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
+ 		struct hlist_head *head = &info->limits[i];
+ 		struct ovs_ct_limit *ct_limit;
+ 
+-		hlist_for_each_entry_rcu(ct_limit, head, hlist_node)
++		hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
++					 lockdep_ovsl_is_held())
+ 			kfree_rcu(ct_limit, rcu);
+ 	}
+ 	kfree(ovs_net->ct_limit_info->limits);
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 07a7dd185995..c39f3c6c061d 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -2466,8 +2466,10 @@ static void __net_exit ovs_exit_net(struct net *dnet)
+ 	struct net *net;
+ 	LIST_HEAD(head);
+ 
+-	ovs_ct_exit(dnet);
+ 	ovs_lock();
++
++	ovs_ct_exit(dnet);
++
+ 	list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
+ 		__dp_destroy(dp);
+ 
+diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
+index b1da5589a0c6..c48f91075b5c 100644
+--- a/net/sched/sch_etf.c
++++ b/net/sched/sch_etf.c
+@@ -82,7 +82,7 @@ static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb)
+ 	if (q->skip_sock_check)
+ 		goto skip;
+ 
+-	if (!sk)
++	if (!sk || !sk_fullsock(sk))
+ 		return false;
+ 
+ 	if (!sock_flag(sk, SOCK_TXTIME))
+@@ -137,8 +137,9 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
+ 	struct sock_exterr_skb *serr;
+ 	struct sk_buff *clone;
+ 	ktime_t txtime = skb->tstamp;
++	struct sock *sk = skb->sk;
+ 
+-	if (!skb->sk || !(skb->sk->sk_txtime_report_errors))
++	if (!sk || !sk_fullsock(sk) || !(sk->sk_txtime_report_errors))
+ 		return;
+ 
+ 	clone = skb_clone(skb, GFP_ATOMIC);
+@@ -154,7 +155,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
+ 	serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */
+ 	serr->ee.ee_info = txtime; /* low part of tstamp */
+ 
+-	if (sock_queue_err_skb(skb->sk, clone))
++	if (sock_queue_err_skb(sk, clone))
+ 		kfree_skb(clone);
+ }
+ 
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index de3c077733a7..298557744818 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -1028,6 +1028,8 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
+ 
+ 	dprintk("svc: svc_delete_xprt(%p)\n", xprt);
+ 	xprt->xpt_ops->xpo_detach(xprt);
++	if (xprt->xpt_bc_xprt)
++		xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt);
+ 
+ 	spin_lock_bh(&serv->sv_lock);
+ 	list_del_init(&xprt->xpt_list);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+index 908e78bb87c6..cf80394b2db3 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+@@ -242,6 +242,8 @@ static void
+ xprt_rdma_bc_close(struct rpc_xprt *xprt)
+ {
+ 	dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
++
++	xprt_disconnect_done(xprt);
+ 	xprt->cwnd = RPC_CWNDSHIFT;
+ }
+ 
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index d86c664ea6af..882f46fadd01 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2714,6 +2714,7 @@ static int bc_send_request(struct rpc_rqst *req)
+ 
+ static void bc_close(struct rpc_xprt *xprt)
+ {
++	xprt_disconnect_done(xprt);
+ }
+ 
+ /*
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index c8c47fc72653..8c47ded2edb6 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -1712,6 +1712,7 @@ exit:
+ 	case -EBUSY:
+ 		this_cpu_inc(stats->stat[STAT_ASYNC]);
+ 		*skb = NULL;
++		tipc_aead_put(aead);
+ 		return rc;
+ 	default:
+ 		this_cpu_inc(stats->stat[STAT_NOK]);
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 0c88778c88b5..d50be9a3d479 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -2037,6 +2037,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+ 		n = tipc_node_find_by_id(net, ehdr->id);
+ 	}
+ 	tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
++	tipc_node_put(n);
+ 	if (!skb)
+ 		return;
+ 
+@@ -2089,7 +2090,7 @@ rcv:
+ 	/* Check/update node state before receiving */
+ 	if (unlikely(skb)) {
+ 		if (unlikely(skb_linearize(skb)))
+-			goto discard;
++			goto out_node_put;
+ 		tipc_node_write_lock(n);
+ 		if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
+ 			if (le->link) {
+@@ -2118,6 +2119,7 @@ rcv:
+ 	if (!skb_queue_empty(&xmitq))
+ 		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
+ 
++out_node_put:
+ 	tipc_node_put(n);
+ discard:
+ 	kfree_skb(skb);
+diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
+index 00e782335cb0..25bf72ee6cad 100644
+--- a/net/x25/x25_dev.c
++++ b/net/x25/x25_dev.c
+@@ -115,8 +115,10 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
+ 		goto drop;
+ 	}
+ 
+-	if (!pskb_may_pull(skb, 1))
++	if (!pskb_may_pull(skb, 1)) {
++		x25_neigh_put(nb);
+ 		return 0;
++	}
+ 
+ 	switch (skb->data[0]) {
+ 
+diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c
+index cc86bf6566e4..9894693f3be1 100644
+--- a/samples/vfio-mdev/mdpy.c
++++ b/samples/vfio-mdev/mdpy.c
+@@ -418,7 +418,7 @@ static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
+ 		return -EINVAL;
+ 
+ 	return remap_vmalloc_range_partial(vma, vma->vm_start,
+-					   mdev_state->memblk,
++					   mdev_state->memblk, 0,
+ 					   vma->vm_end - vma->vm_start);
+ }
+ 
+diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
+index 82773cc35d35..0f8c77f84711 100644
+--- a/scripts/kconfig/qconf.cc
++++ b/scripts/kconfig/qconf.cc
+@@ -627,7 +627,7 @@ void ConfigList::updateMenuList(ConfigItem *parent, struct menu* menu)
+ 			last = item;
+ 			continue;
+ 		}
+-	hide:
++hide:
+ 		if (item && item->menu == child) {
+ 			last = parent->firstChild();
+ 			if (last == item)
+@@ -692,7 +692,7 @@ void ConfigList::updateMenuList(ConfigList *parent, struct menu* menu)
+ 			last = item;
+ 			continue;
+ 		}
+-	hide:
++hide:
+ 		if (item && item->menu == child) {
+ 			last = (ConfigItem*)parent->topLevelItem(0);
+ 			if (last == item)
+@@ -1225,10 +1225,11 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
+ {
+ 	QMenu* popup = Parent::createStandardContextMenu(pos);
+ 	QAction* action = new QAction("Show Debug Info", popup);
+-	  action->setCheckable(true);
+-	  connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
+-	  connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
+-	  action->setChecked(showDebug());
++
++	action->setCheckable(true);
++	connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
++	connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
++	action->setChecked(showDebug());
+ 	popup->addSeparator();
+ 	popup->addAction(action);
+ 	return popup;
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index ba3e2da14cef..6d0ca48ae9a5 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -16,6 +16,8 @@
+ #include <linux/keyctl.h>
+ #include <linux/refcount.h>
+ #include <linux/compat.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
+ 
+ struct iovec;
+ 
+@@ -349,4 +351,14 @@ static inline void key_check(const struct key *key)
+ 
+ #endif
+ 
++/*
++ * Helper function to clear and free a kvmalloc'ed memory object.
++ */
++static inline void __kvzfree(const void *addr, size_t len)
++{
++	if (addr) {
++		memset((void *)addr, 0, len);
++		kvfree(addr);
++	}
++}
+ #endif /* _INTERNAL_H */
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 106e16f9006b..5e01192e222a 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -339,7 +339,7 @@ long keyctl_update_key(key_serial_t id,
+ 	payload = NULL;
+ 	if (plen) {
+ 		ret = -ENOMEM;
+-		payload = kmalloc(plen, GFP_KERNEL);
++		payload = kvmalloc(plen, GFP_KERNEL);
+ 		if (!payload)
+ 			goto error;
+ 
+@@ -360,7 +360,7 @@ long keyctl_update_key(key_serial_t id,
+ 
+ 	key_ref_put(key_ref);
+ error2:
+-	kzfree(payload);
++	__kvzfree(payload, plen);
+ error:
+ 	return ret;
+ }
+@@ -827,7 +827,8 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
+ 	struct key *key;
+ 	key_ref_t key_ref;
+ 	long ret;
+-	char *key_data;
++	char *key_data = NULL;
++	size_t key_data_len;
+ 
+ 	/* find the key first */
+ 	key_ref = lookup_user_key(keyid, 0, 0);
+@@ -878,24 +879,51 @@ can_read_key:
+ 	 * Allocating a temporary buffer to hold the keys before
+ 	 * transferring them to user buffer to avoid potential
+ 	 * deadlock involving page fault and mmap_sem.
++	 *
++	 * key_data_len = (buflen <= PAGE_SIZE)
++	 *		? buflen : actual length of key data
++	 *
++	 * This prevents allocating arbitrary large buffer which can
++	 * be much larger than the actual key length. In the latter case,
++	 * at least 2 passes of this loop is required.
+ 	 */
+-	key_data = kmalloc(buflen, GFP_KERNEL);
++	key_data_len = (buflen <= PAGE_SIZE) ? buflen : 0;
++	for (;;) {
++		if (key_data_len) {
++			key_data = kvmalloc(key_data_len, GFP_KERNEL);
++			if (!key_data) {
++				ret = -ENOMEM;
++				goto key_put_out;
++			}
++		}
+ 
+-	if (!key_data) {
+-		ret = -ENOMEM;
+-		goto key_put_out;
+-	}
+-	ret = __keyctl_read_key(key, key_data, buflen);
++		ret = __keyctl_read_key(key, key_data, key_data_len);
++
++		/*
++		 * Read methods will just return the required length without
++		 * any copying if the provided length isn't large enough.
++		 */
++		if (ret <= 0 || ret > buflen)
++			break;
++
++		/*
++		 * The key may change (unlikely) in between 2 consecutive
++		 * __keyctl_read_key() calls. In this case, we reallocate
++		 * a larger buffer and redo the key read when
++		 * key_data_len < ret <= buflen.
++		 */
++		if (ret > key_data_len) {
++			if (unlikely(key_data))
++				__kvzfree(key_data, key_data_len);
++			key_data_len = ret;
++			continue;	/* Allocate buffer */
++		}
+ 
+-	/*
+-	 * Read methods will just return the required length without
+-	 * any copying if the provided length isn't large enough.
+-	 */
+-	if (ret > 0 && ret <= buflen) {
+ 		if (copy_to_user(buffer, key_data, ret))
+ 			ret = -EFAULT;
++		break;
+ 	}
+-	kzfree(key_data);
++	__kvzfree(key_data, key_data_len);
+ 
+ key_put_out:
+ 	key_put(key);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index f41d8b7864c1..af21e9583c0d 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2076,7 +2076,6 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
+  * should be ignored from the beginning.
+  */
+ static const struct snd_pci_quirk driver_blacklist[] = {
+-	SND_PCI_QUIRK(0x1043, 0x874f, "ASUS ROG Zenith II / Strix", 0),
+ 	SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0),
+ 	SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0),
+ 	{}
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 5119a9ae3d8a..8bc4d66ff986 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -38,6 +38,10 @@ static bool static_hdmi_pcm;
+ module_param(static_hdmi_pcm, bool, 0644);
+ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
+ 
++static bool enable_acomp = true;
++module_param(enable_acomp, bool, 0444);
++MODULE_PARM_DESC(enable_acomp, "Enable audio component binding (default=yes)");
++
+ struct hdmi_spec_per_cvt {
+ 	hda_nid_t cvt_nid;
+ 	int assigned;
+@@ -2638,6 +2642,11 @@ static void generic_acomp_init(struct hda_codec *codec,
+ {
+ 	struct hdmi_spec *spec = codec->spec;
+ 
++	if (!enable_acomp) {
++		codec_info(codec, "audio component disabled by module option\n");
++		return;
++	}
++
+ 	spec->port2pin = port2pin;
+ 	setup_drm_audio_ops(codec, ops);
+ 	if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops,
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 02b9830d4b5f..f2fccf267b48 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -369,6 +369,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 	case 0x10ec0233:
+ 	case 0x10ec0235:
+ 	case 0x10ec0236:
++	case 0x10ec0245:
+ 	case 0x10ec0255:
+ 	case 0x10ec0256:
+ 	case 0x10ec0257:
+@@ -789,9 +790,11 @@ static void alc_ssid_check(struct hda_codec *codec, const hda_nid_t *ports)
+ {
+ 	if (!alc_subsystem_id(codec, ports)) {
+ 		struct alc_spec *spec = codec->spec;
+-		codec_dbg(codec,
+-			  "realtek: Enable default setup for auto mode as fallback\n");
+-		spec->init_amp = ALC_INIT_DEFAULT;
++		if (spec->init_amp == ALC_INIT_UNDEFINED) {
++			codec_dbg(codec,
++				  "realtek: Enable default setup for auto mode as fallback\n");
++			spec->init_amp = ALC_INIT_DEFAULT;
++		}
+ 	}
+ }
+ 
+@@ -8071,6 +8074,7 @@ static int patch_alc269(struct hda_codec *codec)
+ 		spec->gen.mixer_nid = 0;
+ 		break;
+ 	case 0x10ec0215:
++	case 0x10ec0245:
+ 	case 0x10ec0285:
+ 	case 0x10ec0289:
+ 		spec->codec_variant = ALC269_TYPE_ALC215;
+@@ -9332,6 +9336,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+ 	HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
++	HDA_CODEC_ENTRY(0x10ec0245, "ALC245", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269),
+diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
+index f883c9340eee..df8f7994d3b7 100644
+--- a/sound/soc/intel/atom/sst-atom-controls.c
++++ b/sound/soc/intel/atom/sst-atom-controls.c
+@@ -966,7 +966,9 @@ static int sst_set_be_modules(struct snd_soc_dapm_widget *w,
+ 	dev_dbg(c->dev, "Enter: widget=%s\n", w->name);
+ 
+ 	if (SND_SOC_DAPM_EVENT_ON(event)) {
++		mutex_lock(&drv->lock);
+ 		ret = sst_send_slot_map(drv);
++		mutex_unlock(&drv->lock);
+ 		if (ret)
+ 			return ret;
+ 		ret = sst_send_pipe_module_params(w, k);
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 6bd9ae813be2..d14d5f7db168 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -591,6 +591,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{
++		/* MPMAN MPWIN895CL */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MPMAN"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MPWIN8900CL"),
++		},
++		.driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++					BYT_RT5640_MONO_SPEAKER |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{	/* MSI S100 tablet */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
+diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
+index c0d422d0ab94..d7dc80ede892 100644
+--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
+@@ -73,7 +73,7 @@ struct q6asm_dai_data {
+ };
+ 
+ static const struct snd_pcm_hardware q6asm_dai_hardware_capture = {
+-	.info =                 (SNDRV_PCM_INFO_MMAP |
++	.info =                 (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BATCH |
+ 				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ 				SNDRV_PCM_INFO_MMAP_VALID |
+ 				SNDRV_PCM_INFO_INTERLEAVED |
+@@ -95,7 +95,7 @@ static const struct snd_pcm_hardware q6asm_dai_hardware_capture = {
+ };
+ 
+ static struct snd_pcm_hardware q6asm_dai_hardware_playback = {
+-	.info =                 (SNDRV_PCM_INFO_MMAP |
++	.info =                 (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BATCH |
+ 				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ 				SNDRV_PCM_INFO_MMAP_VALID |
+ 				SNDRV_PCM_INFO_INTERLEAVED |
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 17962564866d..c8fd65318d5e 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -423,7 +423,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
+ 
+ 			memset(&template, 0, sizeof(template));
+ 			template.reg = e->reg;
+-			template.mask = e->mask << e->shift_l;
++			template.mask = e->mask;
+ 			template.shift = e->shift_l;
+ 			template.off_val = snd_soc_enum_item_to_val(e, 0);
+ 			template.on_val = template.off_val;
+@@ -546,8 +546,22 @@ static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol,
+ 	if (data->value == value)
+ 		return false;
+ 
+-	if (data->widget)
+-		data->widget->on_val = value;
++	if (data->widget) {
++		switch (dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->id) {
++		case snd_soc_dapm_switch:
++		case snd_soc_dapm_mixer:
++		case snd_soc_dapm_mixer_named_ctl:
++			data->widget->on_val = value & data->widget->mask;
++			break;
++		case snd_soc_dapm_demux:
++		case snd_soc_dapm_mux:
++			data->widget->on_val = value >> data->widget->shift;
++			break;
++		default:
++			data->widget->on_val = value;
++			break;
++		}
++	}
+ 
+ 	data->value = value;
+ 
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 9f5cb4ed3a0c..928c8761a962 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -247,6 +247,52 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
+ 	return 0;
+ }
+ 
++/*
++ * Many Focusrite devices supports a limited set of sampling rates per
++ * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
++ * descriptor which has a non-standard bLength = 10.
++ */
++static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
++					struct audioformat *fp,
++					unsigned int rate)
++{
++	struct usb_interface *iface;
++	struct usb_host_interface *alts;
++	unsigned char *fmt;
++	unsigned int max_rate;
++
++	iface = usb_ifnum_to_if(chip->dev, fp->iface);
++	if (!iface)
++		return true;
++
++	alts = &iface->altsetting[fp->altset_idx];
++	fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen,
++				      NULL, UAC_FORMAT_TYPE);
++	if (!fmt)
++		return true;
++
++	if (fmt[0] == 10) { /* bLength */
++		max_rate = combine_quad(&fmt[6]);
++
++		/* Validate max rate */
++		if (max_rate != 48000 &&
++		    max_rate != 96000 &&
++		    max_rate != 192000 &&
++		    max_rate != 384000) {
++
++			usb_audio_info(chip,
++				"%u:%d : unexpected max rate: %u\n",
++				fp->iface, fp->altsetting, max_rate);
++
++			return true;
++		}
++
++		return rate <= max_rate;
++	}
++
++	return true;
++}
++
+ /*
+  * Helper function to walk the array of sample rate triplets reported by
+  * the device. The problem is that we need to parse whole array first to
+@@ -283,6 +329,11 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip,
+ 		}
+ 
+ 		for (rate = min; rate <= max; rate += res) {
++			/* Filter out invalid rates on Focusrite devices */
++			if (USB_ID_VENDOR(chip->usb_id) == 0x1235 &&
++			    !focusrite_valid_sample_rate(chip, fp, rate))
++				goto skip_rate;
++
+ 			if (fp->rate_table)
+ 				fp->rate_table[nr_rates] = rate;
+ 			if (!fp->rate_min || rate < fp->rate_min)
+@@ -297,6 +348,7 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip,
+ 				break;
+ 			}
+ 
++skip_rate:
+ 			/* avoid endless loop */
+ 			if (res == 0)
+ 				break;
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 7e2e1fc5b9f0..7a2961ad60de 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1755,8 +1755,10 @@ static void build_connector_control(struct usb_mixer_interface *mixer,
+ {
+ 	struct snd_kcontrol *kctl;
+ 	struct usb_mixer_elem_info *cval;
++	const struct usbmix_name_map *map;
+ 
+-	if (check_ignored_ctl(find_map(imap, term->id, 0)))
++	map = find_map(imap, term->id, 0);
++	if (check_ignored_ctl(map))
+ 		return;
+ 
+ 	cval = kzalloc(sizeof(*cval), GFP_KERNEL);
+@@ -1788,8 +1790,12 @@ static void build_connector_control(struct usb_mixer_interface *mixer,
+ 		usb_mixer_elem_info_free(cval);
+ 		return;
+ 	}
+-	get_connector_control_name(mixer, term, is_input, kctl->id.name,
+-				   sizeof(kctl->id.name));
++
++	if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)))
++		strlcat(kctl->id.name, " Jack", sizeof(kctl->id.name));
++	else
++		get_connector_control_name(mixer, term, is_input, kctl->id.name,
++					   sizeof(kctl->id.name));
+ 	kctl->private_free = snd_usb_mixer_elem_free;
+ 	snd_usb_mixer_add_control(&cval->head, kctl);
+ }
+@@ -3090,6 +3096,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
+ 		if (map->id == state.chip->usb_id) {
+ 			state.map = map->map;
+ 			state.selector_map = map->selector_map;
++			mixer->connector_map = map->connector_map;
+ 			mixer->ignore_ctl_error |= map->ignore_ctl_error;
+ 			break;
+ 		}
+@@ -3171,10 +3178,32 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
+ 	return 0;
+ }
+ 
++static int delegate_notify(struct usb_mixer_interface *mixer, int unitid,
++			   u8 *control, u8 *channel)
++{
++	const struct usbmix_connector_map *map = mixer->connector_map;
++
++	if (!map)
++		return unitid;
++
++	for (; map->id; map++) {
++		if (map->id == unitid) {
++			if (control && map->control)
++				*control = map->control;
++			if (channel && map->channel)
++				*channel = map->channel;
++			return map->delegated_id;
++		}
++	}
++	return unitid;
++}
++
+ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
+ {
+ 	struct usb_mixer_elem_list *list;
+ 
++	unitid = delegate_notify(mixer, unitid, NULL, NULL);
++
+ 	for_each_mixer_elem(list, mixer, unitid) {
+ 		struct usb_mixer_elem_info *info =
+ 			mixer_elem_list_to_info(list);
+@@ -3244,6 +3273,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
+ 		return;
+ 	}
+ 
++	unitid = delegate_notify(mixer, unitid, &control, &channel);
++
+ 	for_each_mixer_elem(list, mixer, unitid)
+ 		count++;
+ 
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index 65d6d08c96f5..41ec9dc4139b 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -6,6 +6,13 @@
+ 
+ struct media_mixer_ctl;
+ 
++struct usbmix_connector_map {
++	u8 id;
++	u8 delegated_id;
++	u8 control;
++	u8 channel;
++};
++
+ struct usb_mixer_interface {
+ 	struct snd_usb_audio *chip;
+ 	struct usb_host_interface *hostif;
+@@ -18,6 +25,9 @@ struct usb_mixer_interface {
+ 	/* the usb audio specification version this interface complies to */
+ 	int protocol;
+ 
++	/* optional connector delegation map */
++	const struct usbmix_connector_map *connector_map;
++
+ 	/* Sound Blaster remote control stuff */
+ 	const struct rc_config *rc_cfg;
+ 	u32 rc_code;
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index b4e77000f441..0260c750e156 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -27,6 +27,7 @@ struct usbmix_ctl_map {
+ 	u32 id;
+ 	const struct usbmix_name_map *map;
+ 	const struct usbmix_selector_map *selector_map;
++	const struct usbmix_connector_map *connector_map;
+ 	int ignore_ctl_error;
+ };
+ 
+@@ -369,6 +370,33 @@ static const struct usbmix_name_map asus_rog_map[] = {
+ 	{}
+ };
+ 
++/* TRX40 mobos with Realtek ALC1220-VB */
++static const struct usbmix_name_map trx40_mobo_map[] = {
++	{ 18, NULL }, /* OT, IEC958 - broken response, disabled */
++	{ 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
++	{ 16, "Speaker" },		/* OT */
++	{ 22, "Speaker Playback" },	/* FU */
++	{ 7, "Line" },			/* IT */
++	{ 19, "Line Capture" },		/* FU */
++	{ 17, "Front Headphone" },	/* OT */
++	{ 23, "Front Headphone Playback" },	/* FU */
++	{ 8, "Mic" },			/* IT */
++	{ 20, "Mic Capture" },		/* FU */
++	{ 9, "Front Mic" },		/* IT */
++	{ 21, "Front Mic Capture" },	/* FU */
++	{ 24, "IEC958 Playback" },	/* FU */
++	{}
++};
++
++static const struct usbmix_connector_map trx40_mobo_connector_map[] = {
++	{ 10, 16 },	/* (Back) Speaker */
++	{ 11, 17 },	/* Front Headphone */
++	{ 13, 7 },	/* Line */
++	{ 14, 8 },	/* Mic */
++	{ 15, 9 },	/* Front Mic */
++	{}
++};
++
+ /*
+  * Control map entries
+  */
+@@ -500,7 +528,8 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 	},
+ 	{	/* Gigabyte TRX40 Aorus Pro WiFi */
+ 		.id = USB_ID(0x0414, 0xa002),
+-		.map = asus_rog_map,
++		.map = trx40_mobo_map,
++		.connector_map = trx40_mobo_connector_map,
+ 	},
+ 	{	/* ASUS ROG Zenith II */
+ 		.id = USB_ID(0x0b05, 0x1916),
+@@ -512,11 +541,13 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 	},
+ 	{	/* MSI TRX40 Creator */
+ 		.id = USB_ID(0x0db0, 0x0d64),
+-		.map = asus_rog_map,
++		.map = trx40_mobo_map,
++		.connector_map = trx40_mobo_connector_map,
+ 	},
+ 	{	/* MSI TRX40 */
+ 		.id = USB_ID(0x0db0, 0x543d),
+-		.map = asus_rog_map,
++		.map = trx40_mobo_map,
++		.connector_map = trx40_mobo_connector_map,
+ 	},
+ 	{ 0 } /* terminator */
+ };
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index c237e24f08d9..0f072426b84c 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -1508,11 +1508,15 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
+ 
+ 	/* use known values for that card: interface#1 altsetting#1 */
+ 	iface = usb_ifnum_to_if(chip->dev, 1);
+-	if (!iface || iface->num_altsetting < 2)
+-		return -EINVAL;
++	if (!iface || iface->num_altsetting < 2) {
++		err = -EINVAL;
++		goto end;
++	}
+ 	alts = &iface->altsetting[1];
+-	if (get_iface_desc(alts)->bNumEndpoints < 1)
+-		return -EINVAL;
++	if (get_iface_desc(alts)->bNumEndpoints < 1) {
++		err = -EINVAL;
++		goto end;
++	}
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	err = snd_usb_ctl_msg(chip->dev,
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index d187aa6d50db..8c2f5c23e1b4 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3592,5 +3592,61 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 		}
+ 	}
+ },
++{
++	/*
++	 * Pioneer DJ DJM-250MK2
++	 * PCM is 8 channels out @ 48 fixed (endpoints 0x01).
++	 * The output from computer to the mixer is usable.
++	 *
++	 * The input (phono or line to computer) is not working.
++	 * It should be at endpoint 0x82 and probably also 8 channels,
++	 * but it seems that it works only with Pioneer proprietary software.
++	 * Even on officially supported OS, the Audacity was unable to record
++	 * and Mixxx to recognize the control vinyls.
++	 */
++	USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0017),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 8, // outputs
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x01,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++						USB_ENDPOINT_SYNC_ASYNC,
++					.rates = SNDRV_PCM_RATE_48000,
++					.rate_min = 48000,
++					.rate_max = 48000,
++					.nr_rates = 1,
++					.rate_table = (unsigned int[]) { 48000 }
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++
++#define ALC1220_VB_DESKTOP(vend, prod) { \
++	USB_DEVICE(vend, prod),	\
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { \
++		.vendor_name = "Realtek", \
++		.product_name = "ALC1220-VB-DT", \
++		.profile_name = "Realtek-ALC1220-VB-Desktop", \
++		.ifnum = QUIRK_NO_INTERFACE \
++	} \
++}
++ALC1220_VB_DESKTOP(0x0414, 0xa002), /* Gigabyte TRX40 Aorus Pro WiFi */
++ALC1220_VB_DESKTOP(0x0db0, 0x0d64), /* MSI TRX40 Creator */
++ALC1220_VB_DESKTOP(0x0db0, 0x543d), /* MSI TRX40 */
++#undef ALC1220_VB_DESKTOP
+ 
+ #undef USB_DEVICE_VENDOR_SPEC
+diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
+index 772f6f3ccbb1..00074af5873c 100644
+--- a/sound/usb/usx2y/usbusx2yaudio.c
++++ b/sound/usb/usx2y/usbusx2yaudio.c
+@@ -681,6 +681,8 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate)
+ 			us->submitted =	2*NOOF_SETRATE_URBS;
+ 			for (i = 0; i < NOOF_SETRATE_URBS; ++i) {
+ 				struct urb *urb = us->urb[i];
++				if (!urb)
++					continue;
+ 				if (urb->status) {
+ 					if (!err)
+ 						err = -ENODEV;
+diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
+index 6d47345a310b..c364e4be5e6e 100644
+--- a/tools/lib/bpf/netlink.c
++++ b/tools/lib/bpf/netlink.c
+@@ -289,6 +289,8 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
+ 
+ static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags)
+ {
++	flags &= XDP_FLAGS_MODES;
++
+ 	if (info->attach_mode != XDP_ATTACHED_MULTI && !flags)
+ 		return info->prog_id;
+ 	if (flags & XDP_FLAGS_DRV_MODE)
+diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
+index dbebf05f5931..47f9cc9dcd94 100644
+--- a/tools/testing/nvdimm/Kbuild
++++ b/tools/testing/nvdimm/Kbuild
+@@ -21,8 +21,8 @@ DRIVERS := ../../../drivers
+ NVDIMM_SRC := $(DRIVERS)/nvdimm
+ ACPI_SRC := $(DRIVERS)/acpi/nfit
+ DAX_SRC := $(DRIVERS)/dax
+-ccflags-y := -I$(src)/$(NVDIMM_SRC)/
+-ccflags-y += -I$(src)/$(ACPI_SRC)/
++ccflags-y := -I$(srctree)/drivers/nvdimm/
++ccflags-y += -I$(srctree)/drivers/acpi/nfit/
+ 
+ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
+ obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
+diff --git a/tools/testing/nvdimm/test/Kbuild b/tools/testing/nvdimm/test/Kbuild
+index fb3c3d7cdb9b..75baebf8f4ba 100644
+--- a/tools/testing/nvdimm/test/Kbuild
++++ b/tools/testing/nvdimm/test/Kbuild
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+-ccflags-y := -I$(src)/../../../../drivers/nvdimm/
+-ccflags-y += -I$(src)/../../../../drivers/acpi/nfit/
++ccflags-y := -I$(srctree)/drivers/nvdimm/
++ccflags-y += -I$(srctree)/drivers/acpi/nfit/
+ 
+ obj-m += nfit_test.o
+ obj-m += nfit_test_iomap.o
+diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
+index bf6422a6af7f..a8ee5c4d41eb 100644
+--- a/tools/testing/nvdimm/test/nfit.c
++++ b/tools/testing/nvdimm/test/nfit.c
+@@ -3164,7 +3164,9 @@ static __init int nfit_test_init(void)
+ 	mcsafe_test();
+ 	dax_pmem_test();
+ 	dax_pmem_core_test();
++#ifdef CONFIG_DEV_DAX_PMEM_COMPAT
+ 	dax_pmem_compat_test();
++#endif
+ 
+ 	nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
+ 
+diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh
+index 8b944cf042f6..315a43111e04 100755
+--- a/tools/testing/selftests/kmod/kmod.sh
++++ b/tools/testing/selftests/kmod/kmod.sh
+@@ -505,18 +505,23 @@ function test_num()
+ 	fi
+ }
+ 
+-function get_test_count()
++function get_test_data()
+ {
+ 	test_num $1
+-	TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
++	local field_num=$(echo $1 | sed 's/^0*//')
++	echo $ALL_TESTS | awk '{print $'$field_num'}'
++}
++
++function get_test_count()
++{
++	TEST_DATA=$(get_test_data $1)
+ 	LAST_TWO=${TEST_DATA#*:*}
+ 	echo ${LAST_TWO%:*}
+ }
+ 
+ function get_test_enabled()
+ {
+-	test_num $1
+-	TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
++	TEST_DATA=$(get_test_data $1)
+ 	echo ${TEST_DATA#*:*:}
+ }
+ 
+diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
+index 796670ebc65b..6560ed796ac4 100755
+--- a/tools/testing/selftests/net/fib_nexthops.sh
++++ b/tools/testing/selftests/net/fib_nexthops.sh
+@@ -749,6 +749,29 @@ ipv4_fcnal_runtime()
+ 	run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ 	log_test $? 0 "Ping - multipath"
+ 
++	run_cmd "$IP ro delete 172.16.101.1/32 nhid 122"
++
++	#
++	# multiple default routes
++	# - tests fib_select_default
++	run_cmd "$IP nexthop add id 501 via 172.16.1.2 dev veth1"
++	run_cmd "$IP ro add default nhid 501"
++	run_cmd "$IP ro add default via 172.16.1.3 dev veth1 metric 20"
++	run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
++	log_test $? 0 "Ping - multiple default routes, nh first"
++
++	# flip the order
++	run_cmd "$IP ro del default nhid 501"
++	run_cmd "$IP ro del default via 172.16.1.3 dev veth1 metric 20"
++	run_cmd "$IP ro add default via 172.16.1.2 dev veth1 metric 20"
++	run_cmd "$IP nexthop replace id 501 via 172.16.1.3 dev veth1"
++	run_cmd "$IP ro add default nhid 501 metric 20"
++	run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
++	log_test $? 0 "Ping - multiple default routes, nh second"
++
++	run_cmd "$IP nexthop delete nhid 501"
++	run_cmd "$IP ro del default"
++
+ 	#
+ 	# IPv4 with blackhole nexthops
+ 	#
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index b7616704b55e..84205c3a55eb 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -618,16 +618,22 @@ fib_nexthop_test()
+ 
+ fib_suppress_test()
+ {
++	echo
++	echo "FIB rule with suppress_prefixlength"
++	setup
++
+ 	$IP link add dummy1 type dummy
+ 	$IP link set dummy1 up
+ 	$IP -6 route add default dev dummy1
+ 	$IP -6 rule add table main suppress_prefixlength 0
+-	ping -f -c 1000 -W 1 1234::1 || true
++	ping -f -c 1000 -W 1 1234::1 >/dev/null 2>&1
+ 	$IP -6 rule del table main suppress_prefixlength 0
+ 	$IP link del dummy1
+ 
+ 	# If we got here without crashing, we're good.
+-	return 0
++	log_test 0 0 "FIB rule suppress test"
++
++	cleanup
+ }
+ 
+ ################################################################################
+diff --git a/tools/vm/Makefile b/tools/vm/Makefile
+index 20f6cf04377f..9860622cbb15 100644
+--- a/tools/vm/Makefile
++++ b/tools/vm/Makefile
+@@ -1,6 +1,8 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for vm tools
+ #
++include ../scripts/Makefile.include
++
+ TARGETS=page-types slabinfo page_owner_sort
+ 
+ LIB_DIR = ../lib/api


             reply	other threads:[~2020-04-29 17:55 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-29 17:55 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2020-06-17 16:41 [gentoo-commits] proj/linux-patches:5.6 commit in: / Mike Pagano
2020-06-10 19:41 Mike Pagano
2020-06-07 21:54 Mike Pagano
2020-06-03 11:44 Mike Pagano
2020-05-27 16:32 Mike Pagano
2020-05-20 23:13 Mike Pagano
2020-05-20 11:35 Mike Pagano
2020-05-14 11:34 Mike Pagano
2020-05-13 16:48 Mike Pagano
2020-05-13 12:06 Mike Pagano
2020-05-11 22:46 Mike Pagano
2020-05-09 19:45 Mike Pagano
2020-05-06 11:47 Mike Pagano
2020-05-02 19:25 Mike Pagano
2020-05-02 13:26 Mike Pagano
2020-04-23 11:56 Mike Pagano
2020-04-21 11:24 Mike Pagano
2020-04-17 14:50 Mike Pagano
2020-04-15 15:40 Mike Pagano
2020-04-13 12:21 Mike Pagano
2020-04-12 15:29 Mike Pagano
2020-04-08 17:39 Mike Pagano
2020-04-08 12:45 Mike Pagano
2020-04-02 11:37 Mike Pagano
2020-04-02 11:35 Mike Pagano
2020-04-01 12:06 Mike Pagano
2020-03-30 12:31 Mike Pagano
2020-03-30 11:33 Mike Pagano
2020-03-30 11:15 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1588182914.9073e1453c396ffd5d5019142d436ff31e4826b4.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox